This is primarily intended to simplify later patches that add various backpointers to the structs, but in the meantime we can enjoy various little syntactic conveniences. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_cmd_parser.c | 12 +- drivers/gpu/drm/i915/i915_drv.h | 9 ++ drivers/gpu/drm/i915/i915_gem.c | 9 +- drivers/gpu/drm/i915/i915_gem_context.c | 19 ++- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +- drivers/gpu/drm/i915/i915_irq.c | 26 ++-- drivers/gpu/drm/i915/intel_display.c | 2 +- drivers/gpu/drm/i915/intel_lrc.c | 122 ++++++++-------- drivers/gpu/drm/i915/intel_ringbuffer.c | 219 +++++++++++++---------------- drivers/gpu/drm/i915/intel_uncore.c | 4 +- 10 files changed, 195 insertions(+), 229 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 2c50142be559..efbba19bb0b2 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -696,12 +696,12 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) int cmd_table_count; int ret; - if (!IS_GEN7(engine->dev)) + if (!IS_GEN7(engine)) return 0; switch (engine->id) { case RCS: - if (IS_HASWELL(engine->dev)) { + if (IS_HASWELL(engine)) { cmd_tables = hsw_render_ring_cmds; cmd_table_count = ARRAY_SIZE(hsw_render_ring_cmds); @@ -713,7 +713,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) engine->reg_table = gen7_render_regs; engine->reg_count = ARRAY_SIZE(gen7_render_regs); - if (IS_HASWELL(engine->dev)) { + if (IS_HASWELL(engine)) { engine->master_reg_table = hsw_master_regs; engine->master_reg_count = ARRAY_SIZE(hsw_master_regs); } else { @@ -729,7 +729,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; break; case BCS: - if (IS_HASWELL(engine->dev)) { + if (IS_HASWELL(engine)) { cmd_tables = hsw_blt_ring_cmds; cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); } else { @@ -740,7 +740,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) engine->reg_table = gen7_blt_regs; engine->reg_count = ARRAY_SIZE(gen7_blt_regs); - if (IS_HASWELL(engine->dev)) { + if (IS_HASWELL(engine)) { engine->master_reg_table = hsw_master_regs; engine->master_reg_count = ARRAY_SIZE(hsw_master_regs); } else { @@ -968,7 +968,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *engine) if (!engine->needs_cmd_parser) return false; - if (!USES_PPGTT(engine->dev)) + if (!USES_PPGTT(engine)) return false; return (i915.enable_cmd_parser == 1); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d6840d380ca5..85c8d93cbb0f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1987,6 +1987,11 @@ static inline struct drm_i915_private *__guc_to_i915(struct intel_guc *guc) return container_of(guc, struct drm_i915_private, guc); } +static inline struct drm_i915_private *__engine_to_i915(struct intel_engine_cs *engine) +{ + return __to_i915(engine->dev); +} + /* Iterate over initialised rings */ #define for_each_engine(ring__, ptr__, i__) \ for ((i__) = 0; (i__) < I915_NUM_ENGINES; (i__)++) \ @@ -2473,6 +2478,10 @@ struct drm_i915_cmd_table { __p = __guc_to_i915((struct intel_guc *)p); \ else if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_gem_request)) \ __p = __request_to_i915((struct drm_i915_gem_request *)(p)); \ + else if (__builtin_types_compatible_p(typeof(*p), struct intel_engine_cs)) \ + __p = __engine_to_i915((struct intel_engine_cs *)(p)); \ + else if (__builtin_types_compatible_p(typeof(*p), struct intel_ringbuffer)) \ + __p = __engine_to_i915(((struct intel_ringbuffer *)(p))->engine); \ else \ BUILD_BUG(); \ __p; \ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7e98cf884972..045c609e8e9e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1242,8 +1242,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, struct intel_rps_client *rps) { struct intel_engine_cs *engine = i915_gem_request_get_engine(req); - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(req); const bool irq_test_in_progress = ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine); int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; @@ -2545,7 +2544,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, return; engine = request->engine; - dev_priv = request->i915; + dev_priv = to_i915(request); ringbuf = request->ringbuf; /* @@ -2686,7 +2685,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine, struct intel_context *ctx, struct drm_i915_gem_request **req_out) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = to_i915(engine); struct drm_i915_gem_request *req; int ret; @@ -2767,7 +2766,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, int err; if (ctx == NULL) - ctx = to_i915(engine->dev)->kernel_context; + ctx = to_i915(engine)->kernel_context; err = __i915_gem_request_alloc(engine, ctx, &req); return err ? ERR_PTR(err) : req; } diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index ccaa106f6936..8ee8eff055d5 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -107,11 +107,11 @@ static size_t get_context_alignment(struct drm_device *dev) static int get_context_size(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; u32 reg; - switch (INTEL_INFO(dev)->gen) { + switch (INTEL_INFO(dev_priv)->gen) { case 6: reg = I915_READ(CXT_SIZE); ret = GEN6_CXT_TOTAL_SIZE(reg) * 64; @@ -617,12 +617,12 @@ static inline bool should_skip_switch(struct intel_engine_cs *engine, static bool needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); if (!to->ppgtt) return false; - if (INTEL_INFO(engine->dev)->gen < 8) + if (INTEL_INFO(dev_priv)->gen < 8) return true; if (engine != &dev_priv->engine[RCS]) @@ -635,12 +635,12 @@ static bool needs_pd_load_post(struct intel_engine_cs *engine, struct intel_context *to, u32 hw_flags) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); if (!to->ppgtt) return false; - if (!IS_GEN8(engine->dev)) + if (!IS_GEN8(dev_priv)) return false; if (engine != &dev_priv->engine[RCS]) @@ -656,13 +656,12 @@ static int do_switch(struct drm_i915_gem_request *req) { struct intel_context *to = req->ctx; struct intel_engine_cs *engine = req->engine; - struct drm_i915_private *dev_priv = req->i915; struct intel_context *from = engine->last_context; u32 hw_flags = 0; bool uninitialized = false; int ret, i; - if (from != NULL && engine == &dev_priv->engine[RCS]) { + if (from != NULL && engine->id == RCS) { BUG_ON(from->legacy_hw_ctx.rcs_state == NULL); BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state)); } @@ -671,7 +670,7 @@ static int do_switch(struct drm_i915_gem_request *req) return 0; /* Trying to pin first makes error handling easier. */ - if (engine == &dev_priv->engine[RCS]) { + if (engine->id == RCS) { ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, get_context_alignment(engine->dev), 0); @@ -700,7 +699,7 @@ static int do_switch(struct drm_i915_gem_request *req) to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); } - if (engine != &dev_priv->engine[RCS]) { + if (engine->id != RCS) { if (from) i915_gem_context_unreference(from); goto done; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 28614c4ecbc5..f093fd40c07a 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -720,7 +720,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, struct i915_address_space *vm; struct list_head ordered_vmas; struct list_head pinned_vmas; - bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4; + bool has_fenced_gpu_access = INTEL_INFO(engine)->gen < 4; int retry; i915_gem_retire_requests_ring(engine); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 8f3e3309c3ab..2a376e49e153 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2827,12 +2827,11 @@ static struct intel_engine_cs * semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, u64 offset) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; struct intel_engine_cs *signaller; int i; - if (INTEL_INFO(dev_priv->dev)->gen >= 8) { - for_each_engine(signaller, dev_priv, i) { + if (INTEL_INFO(engine)->gen >= 8) { + for_each_engine(signaller, engine, i) { if (engine == signaller) continue; @@ -2842,7 +2841,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, } else { u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; - for_each_engine(signaller, dev_priv, i) { + for_each_engine(signaller, engine, i) { if(engine == signaller) continue; @@ -2860,7 +2859,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, static struct intel_engine_cs * semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); u32 cmd, ipehr, head; u64 offset = 0; int i, backwards; @@ -2898,7 +2897,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) * ringbuffer itself. */ head = I915_READ_HEAD(engine) & HEAD_ADDR; - backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4; + backwards = (INTEL_INFO(engine)->gen >= 8) ? 5 : 4; for (i = backwards; i; --i) { /* @@ -2920,7 +2919,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) return NULL; *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1; - if (INTEL_INFO(engine->dev)->gen >= 8) { + if (INTEL_INFO(engine)->gen >= 8) { offset = ioread32(engine->buffer->virtual_start + head + 12); offset <<= 32; offset = ioread32(engine->buffer->virtual_start + head + 8); @@ -2930,7 +2929,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) static int semaphore_passed(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); struct intel_engine_cs *signaller; u32 seqno; @@ -3014,8 +3013,7 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd) static enum intel_ring_hangcheck_action ring_stuck(struct intel_engine_cs *engine, u64 acthd) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); enum intel_ring_hangcheck_action ha; u32 tmp; @@ -3023,7 +3021,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd) if (ha != HANGCHECK_HUNG) return ha; - if (IS_GEN2(dev)) + if (IS_GEN2(engine)) return HANGCHECK_HUNG; /* Is the chip hanging on a WAIT_FOR_EVENT? @@ -3033,19 +3031,19 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd) */ tmp = I915_READ_CTL(engine); if (tmp & RING_WAIT) { - i915_handle_error(dev, false, + i915_handle_error(engine->dev, false, "Kicking stuck wait on %s", engine->name); I915_WRITE_CTL(engine, tmp); return HANGCHECK_KICK; } - if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { + if (INTEL_INFO(engine)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { switch (semaphore_passed(engine)) { default: return HANGCHECK_HUNG; case 1: - i915_handle_error(dev, false, + i915_handle_error(engine->dev, false, "Kicking stuck semaphore on %s", engine->name); I915_WRITE_CTL(engine, tmp); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 74b0165238dc..d6191b3c53c7 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -11231,7 +11231,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine, if (engine == NULL) return true; - if (INTEL_INFO(engine->dev)->gen < 5) + if (INTEL_INFO(engine)->gen < 5) return false; if (i915.use_mmio_flip < 0) diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 0789f4581f7d..5e3246b05a1e 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -268,19 +268,18 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists static void logical_ring_init_platform_invariants(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - - if (IS_GEN8(dev) || IS_GEN9(dev)) + if (IS_GEN8(engine) || IS_GEN9(engine)) engine->idle_lite_restore_wa = ~0; - engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) && - (engine->id == VCS || engine->id == VCS2); + engine->disable_lite_restore_wa = + (IS_SKL_REVID(engine, 0, SKL_REVID_B0) || + IS_BXT_REVID(engine, 0, BXT_REVID_A1)) && + (engine->id == VCS || engine->id == VCS2); engine->ctx_desc_template = GEN8_CTX_VALID; - engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) << - GEN8_CTX_ADDRESSING_MODE_SHIFT; - if (IS_GEN8(dev)) + engine->ctx_desc_template |= + GEN8_CTX_ADDRESSING_MODE(engine) << GEN8_CTX_ADDRESSING_MODE_SHIFT; + if (IS_GEN8(engine)) engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT; engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE; @@ -397,9 +396,8 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state) static void execlists_update_context(struct drm_i915_gem_request *rq) { - struct intel_engine_cs *engine = rq->engine; struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; - uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state; + uint32_t *reg_state = rq->ctx->engine[rq->engine->id].lrc_reg_state; reg_state[CTX_RING_TAIL+1] = rq->tail; @@ -408,14 +406,14 @@ static void execlists_update_context(struct drm_i915_gem_request *rq) * PML4 is allocated during ppgtt init, so this is not needed * in 48-bit mode. */ - if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) + if (ppgtt && !USES_FULL_48BIT_PPGTT(rq)) execlists_update_context_pdps(ppgtt, reg_state); } static void execlists_submit_requests(struct drm_i915_gem_request *rq0, struct drm_i915_gem_request *rq1) { - struct drm_i915_private *dev_priv = rq0->i915; + struct drm_i915_private *dev_priv = to_i915(rq0); /* BUG_ON(!irqs_disabled()); */ @@ -444,7 +442,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine) * If irqs are not active generate a warning as batches that finish * without the irqs may get lost and a GPU Hang may occur. */ - WARN_ON(!intel_irqs_enabled(engine->dev->dev_private)); + WARN_ON(!intel_irqs_enabled(to_i915(engine))); /* Try to read in pairs */ list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue, @@ -519,7 +517,7 @@ static u32 get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer, u32 *context_id) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); u32 status; read_pointer %= GEN8_CSB_ENTRIES; @@ -544,7 +542,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer, */ void intel_lrc_irq_handler(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); u32 status_pointer; unsigned int read_pointer, write_pointer; u32 csb[GEN8_CSB_ENTRIES][2]; @@ -1051,14 +1049,14 @@ void intel_execlists_retire_requests(struct intel_engine_cs *engine) void intel_logical_ring_stop(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); int ret; if (!intel_engine_initialized(engine)) return; ret = intel_engine_idle(engine); - if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error)) + if (ret && !i915_reset_in_progress(&to_i915(engine)->gpu_error)) DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", engine->name, ret); @@ -1090,8 +1088,7 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req) static int intel_lr_context_do_pin(struct intel_context *ctx, struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = ctx->i915; struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf; struct page *lrc_state_page; @@ -1248,7 +1245,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, * this batch updates GEN8_L3SQCREG4 with default value we need to * set this bit here to retain the WA during flush. */ - if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0)) + if (IS_SKL_REVID(engine, 0, SKL_REVID_E0)) l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | @@ -1337,7 +1334,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine, wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ - if (IS_BROADWELL(engine->dev)) { + if (IS_BROADWELL(engine)) { int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index); if (rc < 0) return rc; @@ -1409,12 +1406,11 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine, uint32_t *offset) { int ret; - struct drm_device *dev = engine->dev; uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); /* WaDisableCtxRestoreArbitration:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_SKL_REVID(engine, 0, SKL_REVID_D0) || + IS_BXT_REVID(engine, 0, BXT_REVID_A1)) wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */ @@ -1435,12 +1431,11 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine, uint32_t *const batch, uint32_t *offset) { - struct drm_device *dev = engine->dev; uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_SKL_REVID(engine, 0, SKL_REVID_B0) || + IS_BXT_REVID(engine, 0, BXT_REVID_A1)) { wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0); wa_ctx_emit(batch, index, @@ -1449,8 +1444,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine, } /* WaDisableCtxRestoreArbitration:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_SKL_REVID(engine, 0, SKL_REVID_D0) || + IS_BXT_REVID(engine, 0, BXT_REVID_A1)) wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE); wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END); @@ -1500,9 +1495,9 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) WARN_ON(engine->id != RCS); /* update this when WA for higher Gen are added */ - if (INTEL_INFO(engine->dev)->gen > 9) { + if (INTEL_INFO(engine)->gen > 9) { DRM_ERROR("WA batch buffer is not initialized for Gen%d\n", - INTEL_INFO(engine->dev)->gen); + INTEL_INFO(engine)->gen); return 0; } @@ -1522,7 +1517,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) batch = kmap_atomic(page); offset = 0; - if (INTEL_INFO(engine->dev)->gen == 8) { + if (INTEL_INFO(engine)->gen == 8) { ret = gen8_init_indirectctx_bb(engine, &wa_ctx->indirect_ctx, batch, @@ -1536,7 +1531,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) &offset); if (ret) goto out; - } else if (INTEL_INFO(engine->dev)->gen == 9) { + } else if (INTEL_INFO(engine)->gen == 9) { ret = gen9_init_indirectctx_bb(engine, &wa_ctx->indirect_ctx, batch, @@ -1562,8 +1557,7 @@ out: static int gen8_init_common_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); unsigned int next_context_status_buffer_hw; lrc_setup_hardware_status_page(engine, @@ -1611,8 +1605,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine) static int gen8_init_render_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); int ret; ret = gen8_init_common_ring(engine); @@ -1717,8 +1710,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); unsigned long flags; if (WARN_ON(!intel_irqs_enabled(dev_priv))) @@ -1737,8 +1729,7 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine) static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1755,8 +1746,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, { struct intel_ringbuffer *ringbuf = request->ringbuf; struct intel_engine_cs *engine = ringbuf->engine; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(request); uint32_t cmd; int ret; @@ -1824,7 +1814,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL * pipe control. */ - if (IS_GEN9(engine->dev)) + if (IS_GEN9(request)) vf_flush_wa = true; } @@ -2014,7 +2004,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) if (!intel_engine_initialized(engine)) return; - dev_priv = engine->dev->dev_private; + dev_priv = to_i915(engine); if (engine->buffer) { intel_logical_ring_stop(engine); @@ -2292,7 +2282,7 @@ cleanup_render_ring: } static u32 -make_rpcs(struct drm_device *dev) +make_rpcs(struct intel_engine_cs *engine) { u32 rpcs = 0; @@ -2300,7 +2290,7 @@ make_rpcs(struct drm_device *dev) * No explicit RPCS request is needed to ensure full * slice/subslice/EU enablement prior to Gen9. */ - if (INTEL_INFO(dev)->gen < 9) + if (INTEL_INFO(engine)->gen < 9) return 0; /* @@ -2309,24 +2299,24 @@ make_rpcs(struct drm_device *dev) * must make an explicit request through RPCS for full * enablement. */ - if (INTEL_INFO(dev)->has_slice_pg) { + if (INTEL_INFO(engine)->has_slice_pg) { rpcs |= GEN8_RPCS_S_CNT_ENABLE; - rpcs |= INTEL_INFO(dev)->slice_total << + rpcs |= INTEL_INFO(engine)->slice_total << GEN8_RPCS_S_CNT_SHIFT; rpcs |= GEN8_RPCS_ENABLE; } - if (INTEL_INFO(dev)->has_subslice_pg) { + if (INTEL_INFO(engine)->has_subslice_pg) { rpcs |= GEN8_RPCS_SS_CNT_ENABLE; - rpcs |= INTEL_INFO(dev)->subslice_per_slice << + rpcs |= INTEL_INFO(engine)->subslice_per_slice << GEN8_RPCS_SS_CNT_SHIFT; rpcs |= GEN8_RPCS_ENABLE; } - if (INTEL_INFO(dev)->has_eu_pg) { - rpcs |= INTEL_INFO(dev)->eu_per_subslice << + if (INTEL_INFO(engine)->has_eu_pg) { + rpcs |= INTEL_INFO(engine)->eu_per_subslice << GEN8_RPCS_EU_MIN_SHIFT; - rpcs |= INTEL_INFO(dev)->eu_per_subslice << + rpcs |= INTEL_INFO(engine)->eu_per_subslice << GEN8_RPCS_EU_MAX_SHIFT; rpcs |= GEN8_RPCS_ENABLE; } @@ -2338,9 +2328,9 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) { u32 indirect_ctx_offset; - switch (INTEL_INFO(engine->dev)->gen) { + switch (INTEL_INFO(engine)->gen) { default: - MISSING_CASE(INTEL_INFO(engine->dev)->gen); + MISSING_CASE(INTEL_INFO(engine)->gen); /* fall through */ case 9: indirect_ctx_offset = @@ -2360,8 +2350,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o struct intel_engine_cs *engine, struct intel_ringbuffer *ringbuf) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = ctx->i915; struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; struct page *page; uint32_t *reg_state; @@ -2400,7 +2389,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o RING_CONTEXT_CONTROL(engine), _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | - (HAS_RESOURCE_STREAMER(dev) ? + (HAS_RESOURCE_STREAMER(engine) ? CTX_CTRL_RS_CTX_ENABLE : 0))); ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base), 0); @@ -2471,7 +2460,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0); - if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { + if (USES_FULL_48BIT_PPGTT(engine)) { /* 64b PPGTT (48bit canonical) * PDP0_DESCRIPTOR contains the base address to PML4 and * other PDP Descriptors are ignored. @@ -2489,7 +2478,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o if (engine->id == RCS) { reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, - make_rpcs(dev)); + make_rpcs(engine)); } kunmap_atomic(reg_state); @@ -2546,11 +2535,11 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine) { int ret = 0; - WARN_ON(INTEL_INFO(engine->dev)->gen < 8); + WARN_ON(INTEL_INFO(engine)->gen < 8); switch (engine->id) { case RCS: - if (INTEL_INFO(engine->dev)->gen >= 9) + if (INTEL_INFO(engine)->gen >= 9) ret = GEN9_LR_CONTEXT_RENDER_SIZE; else ret = GEN8_LR_CONTEXT_RENDER_SIZE; @@ -2569,7 +2558,7 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine) static void lrc_setup_hardware_status_page(struct intel_engine_cs *engine, struct drm_i915_gem_object *default_ctx_obj) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); struct page *page; /* The HWSP is part of the default context object in LRC mode. */ @@ -2601,7 +2590,6 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *engine, int intel_lr_context_deferred_alloc(struct intel_context *ctx, struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; struct drm_i915_gem_object *ctx_obj; uint32_t context_size; struct intel_ringbuffer *ringbuf; @@ -2615,7 +2603,7 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx, /* One extra page as the sharing data between driver and GuC */ context_size += PAGE_SIZE * LRC_PPHWSP_PN; - ctx_obj = i915_gem_alloc_object(dev, context_size); + ctx_obj = i915_gem_alloc_object(engine->dev, context_size); if (!ctx_obj) { DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n"); return -ENOMEM; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 8a293a25588c..5173d52b76de 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -61,7 +61,7 @@ int intel_ring_space(struct intel_ringbuffer *ringbuf) bool intel_engine_stopped(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine); } @@ -431,19 +431,19 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, static void ring_write_tail(struct intel_engine_cs *engine, u32 value) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); I915_WRITE_TAIL(engine, value); } u64 intel_ring_get_active_head(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); u64 acthd; - if (INTEL_INFO(engine->dev)->gen >= 8) + if (INTEL_INFO(engine)->gen >= 8) acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base), RING_ACTHD_UDW(engine->mmio_base)); - else if (INTEL_INFO(engine->dev)->gen >= 4) + else if (INTEL_INFO(engine)->gen >= 4) acthd = I915_READ(RING_ACTHD(engine->mmio_base)); else acthd = I915_READ(ACTHD); @@ -453,25 +453,24 @@ u64 intel_ring_get_active_head(struct intel_engine_cs *engine) static void ring_setup_phys_status_page(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); u32 addr; addr = dev_priv->status_page_dmah->busaddr; - if (INTEL_INFO(engine->dev)->gen >= 4) + if (INTEL_INFO(engine)->gen >= 4) addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; I915_WRITE(HWS_PGA, addr); } static void intel_ring_setup_status_page(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); i915_reg_t mmio; /* The ring status page addresses are no longer next to the rest of * the ring registers as of gen7. */ - if (IS_GEN7(dev)) { + if (IS_GEN7(engine)) { switch (engine->id) { case RCS: mmio = RENDER_HWS_PGA_GEN7; @@ -491,7 +490,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) mmio = VEBOX_HWS_PGA_GEN7; break; } - } else if (IS_GEN6(engine->dev)) { + } else if (IS_GEN6(engine)) { mmio = RING_HWS_PGA_GEN6(engine->mmio_base); } else { /* XXX: gen8 returns to sanity */ @@ -508,7 +507,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) * arises: do we still need this and if so how should we go about * invalidating the TLB? */ - if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { + if (INTEL_INFO(engine)->gen >= 6 && INTEL_INFO(engine)->gen < 8) { i915_reg_t reg = RING_INSTPM(engine->mmio_base); /* ring should be idle before issuing a sync flush*/ @@ -526,9 +525,9 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) static bool stop_ring(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = to_i915(engine); - if (!IS_GEN2(engine->dev)) { + if (!IS_GEN2(engine)) { I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) { DRM_ERROR("%s : timed out trying to stop ring\n", @@ -546,7 +545,7 @@ static bool stop_ring(struct intel_engine_cs *engine) I915_WRITE_HEAD(engine, 0); engine->write_tail(engine, 0); - if (!IS_GEN2(engine->dev)) { + if (!IS_GEN2(engine)) { (void)I915_READ_CTL(engine); I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); } @@ -556,8 +555,7 @@ static bool stop_ring(struct intel_engine_cs *engine) static int init_ring_common(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); struct intel_ringbuffer *ringbuf = engine->buffer; struct drm_i915_gem_object *obj = ringbuf->obj; int ret = 0; @@ -587,7 +585,7 @@ static int init_ring_common(struct intel_engine_cs *engine) } } - if (I915_NEED_GFX_HWS(dev)) + if (I915_NEED_GFX_HWS(dev_priv)) intel_ring_setup_status_page(engine); else ring_setup_phys_status_page(engine); @@ -644,12 +642,10 @@ out: void intel_fini_pipe_control(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - if (engine->scratch.obj == NULL) return; - if (INTEL_INFO(dev)->gen >= 5) { + if (INTEL_INFO(engine)->gen >= 5) { kunmap(sg_page(engine->scratch.obj->pages->sgl)); i915_gem_object_ggtt_unpin(engine->scratch.obj); } @@ -794,7 +790,7 @@ static int wa_add(struct drm_i915_private *dev_priv, static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, i915_reg_t reg) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); struct i915_workarounds *wa = &dev_priv->workarounds; const uint32_t index = wa->hw_whitelist_count[engine->id]; @@ -810,8 +806,7 @@ static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, static int gen8_init_workarounds(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); @@ -862,9 +857,8 @@ static int gen8_init_workarounds(struct intel_engine_cs *engine) static int bdw_init_workarounds(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = to_i915(engine); int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; ret = gen8_init_workarounds(engine); if (ret) @@ -884,16 +878,15 @@ static int bdw_init_workarounds(struct intel_engine_cs *engine) /* WaForceContextSaveRestoreNonCoherent:bdw */ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ - (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); + (IS_BDW_GT3(engine) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); return 0; } static int chv_init_workarounds(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = to_i915(engine); int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; ret = gen8_init_workarounds(engine); if (ret) @@ -910,8 +903,7 @@ static int chv_init_workarounds(struct intel_engine_cs *engine) static int gen9_init_workarounds(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); uint32_t tmp; int ret; @@ -934,14 +926,14 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_SKL_REVID(engine, 0, SKL_REVID_B0) || + IS_BXT_REVID(engine, 0, BXT_REVID_A1)) WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, GEN9_DG_MIRROR_FIX_ENABLE); /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_SKL_REVID(engine, 0, SKL_REVID_B0) || + IS_BXT_REVID(engine, 0, BXT_REVID_A1)) { WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, GEN9_RHWO_OPTIMIZATION_DISABLE); /* @@ -952,7 +944,8 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) } /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */ - if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER) || IS_BROXTON(dev)) + if (IS_SKL_REVID(engine, SKL_REVID_C0, REVID_FOREVER) || + IS_BROXTON(engine)) WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, GEN9_ENABLE_YV12_BUGFIX); @@ -966,20 +959,20 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) GEN9_CCS_TLB_PREFETCH_ENABLE); /* WaDisableMaskBasedCammingInRCC:skl,bxt */ - if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_SKL_REVID(engine, SKL_REVID_C0, SKL_REVID_C0) || + IS_BXT_REVID(engine, 0, BXT_REVID_A1)) WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, PIXEL_MASK_CAMMING_DISABLE); /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; - if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) || - IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER)) + if (IS_SKL_REVID(engine, SKL_REVID_F0, SKL_REVID_F0) || + IS_BXT_REVID(engine, BXT_REVID_B0, REVID_FOREVER)) tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */ - if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0)) + if (IS_SKYLAKE(engine) || IS_BXT_REVID(engine, 0, BXT_REVID_B0)) WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, GEN8_SAMPLER_POWER_BYPASS_DIS); @@ -1005,8 +998,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) static int skl_tune_iz_hashing(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); u8 vals[3] = { 0, 0, 0 }; unsigned int i; @@ -1047,9 +1039,8 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine) static int skl_init_workarounds(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = to_i915(engine); int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; ret = gen9_init_workarounds(engine); if (ret) @@ -1060,12 +1051,12 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) * until D0 which is the default case so this is equivalent to * !WaDisablePerCtxtPreemptionGranularityControl:skl */ - if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) { + if (IS_SKL_REVID(engine, SKL_REVID_E0, REVID_FOREVER)) { I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); } - if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) { + if (IS_SKL_REVID(engine, 0, SKL_REVID_D0)) { /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ I915_WRITE(FF_SLICE_CS_CHICKEN2, _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); @@ -1074,23 +1065,23 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes * involving this register should also be added to WA batch as required. */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_E0)) + if (IS_SKL_REVID(engine, 0, SKL_REVID_E0)) /* WaDisableLSQCROPERFforOCL:skl */ I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_RO_PERF_DIS); /* WaEnableGapsTsvCreditFix:skl */ - if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) { + if (IS_SKL_REVID(engine, SKL_REVID_C0, REVID_FOREVER)) { I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE)); } /* WaDisablePowerCompilerClockGating:skl */ - if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0)) + if (IS_SKL_REVID(engine, SKL_REVID_B0, SKL_REVID_B0)) WA_SET_BIT_MASKED(HIZ_CHICKEN, BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); - if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) { + if (IS_SKL_REVID(engine, 0, SKL_REVID_F0)) { /* *Use Force Non-Coherent whenever executing a 3D context. This * is a workaround for a possible hang in the unlikely event @@ -1106,13 +1097,13 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) } /* WaBarrierPerformanceFixDisable:skl */ - if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0)) + if (IS_SKL_REVID(engine, SKL_REVID_C0, SKL_REVID_D0)) WA_SET_BIT_MASKED(HDC_CHICKEN0, HDC_FENCE_DEST_SLM_DISABLE | HDC_BARRIER_PERFORMANCE_DISABLE); /* WaDisableSbeCacheDispatchPortSharing:skl */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) + if (IS_SKL_REVID(engine, 0, SKL_REVID_F0)) WA_SET_BIT_MASKED( GEN7_HALF_SLICE_CHICKEN1, GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); @@ -1127,9 +1118,8 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) static int bxt_init_workarounds(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = to_i915(engine); int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; ret = gen9_init_workarounds(engine); if (ret) @@ -1137,11 +1127,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) /* WaStoreMultiplePTEenable:bxt */ /* This is a requirement according to Hardware specification */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_BXT_REVID(engine, 0, BXT_REVID_A1)) I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); /* WaSetClckGatingDisableMedia:bxt */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_BXT_REVID(engine, 0, BXT_REVID_A1)) { I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); } @@ -1151,7 +1141,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) STALL_DOP_GATING_DISABLE); /* WaDisableSbeCacheDispatchPortSharing:bxt */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) { + if (IS_BXT_REVID(engine, 0, BXT_REVID_B0)) { WA_SET_BIT_MASKED( GEN7_HALF_SLICE_CHICKEN1, GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); @@ -1161,7 +1151,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */ /* WaDisableObjectLevelPreemtionForInstanceId:bxt */ /* WaDisableLSQCROPERFforOCL:bxt */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_BXT_REVID(engine, 0, BXT_REVID_A1)) { ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1); if (ret) return ret; @@ -1176,24 +1166,23 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) int init_workarounds_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); WARN_ON(engine->id != RCS); dev_priv->workarounds.count = 0; dev_priv->workarounds.hw_whitelist_count[RCS] = 0; - if (IS_BROADWELL(dev)) + if (IS_BROADWELL(engine)) return bdw_init_workarounds(engine); - if (IS_CHERRYVIEW(dev)) + if (IS_CHERRYVIEW(engine)) return chv_init_workarounds(engine); - if (IS_SKYLAKE(dev)) + if (IS_SKYLAKE(engine)) return skl_init_workarounds(engine); - if (IS_BROXTON(dev)) + if (IS_BROXTON(engine)) return bxt_init_workarounds(engine); return 0; @@ -1201,14 +1190,13 @@ int init_workarounds_ring(struct intel_engine_cs *engine) static int init_render_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); int ret = init_ring_common(engine); if (ret) return ret; /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ - if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) + if (INTEL_INFO(engine)->gen >= 4 && INTEL_INFO(engine)->gen < 7) I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); /* We need to disable the AsyncFlip performance optimisations in order @@ -1217,22 +1205,22 @@ static int init_render_ring(struct intel_engine_cs *engine) * * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv */ - if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) + if (INTEL_INFO(engine)->gen >= 6 && INTEL_INFO(engine)->gen < 8) I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); /* Required for the hardware to program scanline values for waiting */ /* WaEnableFlushTlbInvalidationMode:snb */ - if (INTEL_INFO(dev)->gen == 6) + if (INTEL_INFO(engine)->gen == 6) I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ - if (IS_GEN7(dev)) + if (IS_GEN7(engine)) I915_WRITE(GFX_MODE_GEN7, _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); - if (IS_GEN6(dev)) { + if (IS_GEN6(engine)) { /* From the Sandybridge PRM, volume 1 part 3, page 24: * "If this bit is set, STCunit will have LRA as replacement * policy. [...] This bit must be reset. LRA replacement @@ -1242,19 +1230,18 @@ static int init_render_ring(struct intel_engine_cs *engine) _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); } - if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) + if (INTEL_INFO(engine)->gen >= 6 && INTEL_INFO(engine)->gen < 8) I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); - if (HAS_L3_DPF(dev)) - I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev)); + if (HAS_L3_DPF(engine)) + I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(engine)); return init_workarounds_ring(engine); } static void render_ring_cleanup(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); if (dev_priv->semaphore_obj) { i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj); @@ -1414,8 +1401,7 @@ gen6_add_request(struct drm_i915_gem_request *req) static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, u32 seqno) { - struct drm_i915_private *dev_priv = dev->dev_private; - return dev_priv->last_seqno < seqno; + return to_i915(dev)->last_seqno < seqno; } /** @@ -1562,7 +1548,7 @@ gen6_ring_get_seqno(struct intel_engine_cs *engine, bool lazy_coherency) * ivb (and maybe also on snb) by reading from a CS register (like * ACTHD) before reading the status page. */ if (!lazy_coherency) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); POSTING_READ(RING_ACTHD(engine->mmio_base)); } @@ -1596,8 +1582,7 @@ pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno) static bool gen5_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); unsigned long flags; if (WARN_ON(!intel_irqs_enabled(dev_priv))) @@ -1614,8 +1599,7 @@ gen5_ring_get_irq(struct intel_engine_cs *engine) static void gen5_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1627,8 +1611,7 @@ gen5_ring_put_irq(struct intel_engine_cs *engine) static bool i9xx_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); unsigned long flags; if (!intel_irqs_enabled(dev_priv)) @@ -1648,8 +1631,7 @@ i9xx_ring_get_irq(struct intel_engine_cs *engine) static void i9xx_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1664,8 +1646,7 @@ i9xx_ring_put_irq(struct intel_engine_cs *engine) static bool i8xx_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); unsigned long flags; if (!intel_irqs_enabled(dev_priv)) @@ -1685,8 +1666,7 @@ i8xx_ring_get_irq(struct intel_engine_cs *engine) static void i8xx_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1739,8 +1719,7 @@ i9xx_add_request(struct drm_i915_gem_request *req) static bool gen6_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); unsigned long flags; if (WARN_ON(!intel_irqs_enabled(dev_priv))) @@ -1748,10 +1727,10 @@ gen6_ring_get_irq(struct intel_engine_cs *engine) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (engine->irq_refcount++ == 0) { - if (HAS_L3_DPF(dev) && engine->id == RCS) + if (HAS_L3_DPF(engine) && engine->id == RCS) I915_WRITE_IMR(engine, ~(engine->irq_enable_mask | - GT_PARITY_ERROR(dev))); + GT_PARITY_ERROR(engine))); else I915_WRITE_IMR(engine, ~engine->irq_enable_mask); gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); @@ -1764,14 +1743,13 @@ gen6_ring_get_irq(struct intel_engine_cs *engine) static void gen6_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--engine->irq_refcount == 0) { - if (HAS_L3_DPF(dev) && engine->id == RCS) - I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev)); + if (HAS_L3_DPF(engine) && engine->id == RCS) + I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(engine)); else I915_WRITE_IMR(engine, ~0); gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); @@ -1782,8 +1760,7 @@ gen6_ring_put_irq(struct intel_engine_cs *engine) static bool hsw_vebox_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); unsigned long flags; if (WARN_ON(!intel_irqs_enabled(dev_priv))) @@ -1802,8 +1779,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *engine) static void hsw_vebox_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1817,8 +1793,7 @@ hsw_vebox_put_irq(struct intel_engine_cs *engine) static bool gen8_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); unsigned long flags; if (WARN_ON(!intel_irqs_enabled(dev_priv))) @@ -1826,7 +1801,7 @@ gen8_ring_get_irq(struct intel_engine_cs *engine) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (engine->irq_refcount++ == 0) { - if (HAS_L3_DPF(dev) && engine->id == RCS) { + if (HAS_L3_DPF(engine) && engine->id == RCS) { I915_WRITE_IMR(engine, ~(engine->irq_enable_mask | GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); @@ -1843,13 +1818,12 @@ gen8_ring_get_irq(struct intel_engine_cs *engine) static void gen8_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--engine->irq_refcount == 0) { - if (HAS_L3_DPF(dev) && engine->id == RCS) { + if (HAS_L3_DPF(engine) && engine->id == RCS) { I915_WRITE_IMR(engine, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); } else { @@ -1971,7 +1945,7 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req, static void cleanup_phys_status_page(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = to_i915(engine); if (!dev_priv->status_page_dmah) return; @@ -2013,7 +1987,7 @@ static int init_status_page(struct intel_engine_cs *engine) goto err_unref; flags = 0; - if (!HAS_LLC(engine->dev)) + if (!HAS_LLC(engine)) /* On g33, we cannot place HWS above 256MiB, so * restrict its pinning to the low mappable arena. * Though this restriction is not documented for @@ -2047,7 +2021,7 @@ err_unref: static int init_phys_status_page(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); if (!dev_priv->status_page_dmah) { dev_priv->status_page_dmah = @@ -2193,7 +2167,7 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size) * of the buffer. */ ring->effective_size = size; - if (IS_I830(engine->dev) || IS_845G(engine->dev)) + if (IS_I830(engine) || IS_845G(engine)) ring->effective_size -= 2 * CACHELINE_BYTES; ring->last_retired_head = -1; @@ -2282,11 +2256,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine) if (!intel_engine_initialized(engine)) return; - dev_priv = to_i915(engine->dev); + dev_priv = to_i915(engine); if (engine->buffer) { intel_stop_engine(engine); - WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0); + WARN_ON(!IS_GEN2(engine) && (I915_READ_MODE(engine) & MODE_IDLE) == 0); intel_unpin_ringbuffer_obj(engine->buffer); intel_ringbuffer_free(engine->buffer); @@ -2296,7 +2270,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine) if (engine->cleanup) engine->cleanup(engine); - if (I915_NEED_GFX_HWS(engine->dev)) { + if (I915_NEED_GFX_HWS(engine)) { cleanup_status_page(engine); } else { WARN_ON(engine->id != RCS); @@ -2536,13 +2510,12 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req) void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); - if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) { + if (INTEL_INFO(engine)->gen == 6 || INTEL_INFO(engine)->gen == 7) { I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); - if (HAS_VEBOX(dev)) + if (HAS_VEBOX(engine)) I915_WRITE(RING_SYNC_2(engine->mmio_base), 0); } @@ -2553,7 +2526,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine, u32 value) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); /* Every tail move must follow the sequence below */ @@ -3167,7 +3140,7 @@ intel_stop_engine(struct intel_engine_cs *engine) return; ret = intel_engine_idle(engine); - if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error)) + if (ret && !i915_reset_in_progress(&to_i915(engine)->gpu_error)) DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", engine->name, ret); diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 512b7faedefd..44f921be04e7 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1583,8 +1583,8 @@ static int wait_for_register_fw(struct drm_i915_private *dev_priv, static int gen8_request_engine_reset(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = to_i915(engine); int ret; - struct drm_i915_private *dev_priv = engine->dev->dev_private; I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); @@ -1602,7 +1602,7 @@ static int gen8_request_engine_reset(struct intel_engine_cs *engine) static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine); I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); -- 2.8.0.rc3 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx