Since drm_i915_gem_request already contains a backpointer to drm_i915_private, this is a fairly trivial operation. However, using a consistent interface does lean convenience to when we need to query device properties, for example. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_drv.h | 8 ++++++ drivers/gpu/drm/i915/i915_gem.c | 16 +++++------- drivers/gpu/drm/i915/i915_gem_context.c | 21 ++++++++-------- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 3 +-- drivers/gpu/drm/i915/i915_gem_gtt.c | 3 +-- drivers/gpu/drm/i915/intel_lrc.c | 24 ++++++++---------- drivers/gpu/drm/i915/intel_mocs.c | 2 +- drivers/gpu/drm/i915/intel_pm.c | 3 +-- drivers/gpu/drm/i915/intel_ringbuffer.c | 40 +++++++++++++----------------- 9 files changed, 56 insertions(+), 64 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d5fa42c96110..d6840d380ca5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2293,6 +2293,12 @@ struct drm_i915_gem_request { }; +static inline struct drm_i915_private * +__request_to_i915(struct drm_i915_gem_request *request) +{ + return request->i915; +} + struct drm_i915_gem_request * __must_check i915_gem_request_alloc(struct intel_engine_cs *engine, struct intel_context *ctx); @@ -2465,6 +2471,8 @@ struct drm_i915_cmd_table { __p = __obj_to_i915((struct drm_i915_gem_object *)p); \ else if (__builtin_types_compatible_p(typeof(*p), struct intel_guc)) \ __p = __guc_to_i915((struct intel_guc *)p); \ + else if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_gem_request)) \ + __p = __request_to_i915((struct drm_i915_gem_request *)(p)); \ else \ BUILD_BUG(); \ __p; \ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 710a6bbc985e..7e98cf884972 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1455,18 +1455,16 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req) int i915_wait_request(struct drm_i915_gem_request *req) { - struct drm_device *dev; struct drm_i915_private *dev_priv; bool interruptible; int ret; BUG_ON(req == NULL); - dev = req->engine->dev; - dev_priv = dev->dev_private; + dev_priv = to_i915(req); interruptible = dev_priv->mm.interruptible; - BUG_ON(!mutex_is_locked(&dev->struct_mutex)); + BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); if (ret) @@ -2674,13 +2672,13 @@ void i915_gem_request_free(struct kref *req_ref) i915_gem_request_remove_from_client(req); if (ctx) { - if (i915.enable_execlists && ctx != req->i915->kernel_context) + if (i915.enable_execlists && ctx != to_i915(req)->kernel_context) intel_lr_context_unpin(ctx, req->engine); i915_gem_context_unreference(ctx); } - kmem_cache_free(req->i915->requests, req); + kmem_cache_free(to_i915(req)->requests, req); } static inline int @@ -4692,12 +4690,10 @@ err: int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice) { struct intel_engine_cs *engine = req->engine; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 *remap_info = dev_priv->l3_parity.remap_info[slice]; + u32 *remap_info = to_i915(req)->l3_parity.remap_info[slice]; int i, ret; - if (!HAS_L3_DPF(dev) || !remap_info) + if (!HAS_L3_DPF(req) || !remap_info) return 0; ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index a8afd0cee7f7..ccaa106f6936 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -515,7 +515,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) const int num_rings = /* Use an extended w/a on ivb+ if signalling from other rings */ i915_semaphore_is_enabled(engine->dev) ? - hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 : + hweight32(INTEL_INFO(req)->ring_mask) - 1 : 0; int len, i, ret; @@ -524,21 +524,21 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) * explicitly, so we rely on the value at ring init, stored in * itlb_before_ctx_switch. */ - if (IS_GEN6(engine->dev)) { + if (IS_GEN6(req)) { ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0); if (ret) return ret; } /* These flags are for resource streamer on HSW+ */ - if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8) + if (IS_HASWELL(req) || INTEL_INFO(req)->gen >= 8) flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN); - else if (INTEL_INFO(engine->dev)->gen < 8) + else if (INTEL_INFO(req)->gen < 8) flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); len = 4; - if (INTEL_INFO(engine->dev)->gen >= 7) + if (INTEL_INFO(req)->gen >= 7) len += 2 + (num_rings ? 4*num_rings + 2 : 0); ret = intel_ring_begin(req, len); @@ -546,14 +546,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) return ret; /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ - if (INTEL_INFO(engine->dev)->gen >= 7) { + if (INTEL_INFO(req)->gen >= 7) { intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE); if (num_rings) { struct intel_engine_cs *signaller; intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(num_rings)); - for_each_engine(signaller, engine->dev, i) { + for_each_engine(signaller, req, i) { if (signaller == engine) continue; @@ -576,13 +576,13 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) */ intel_ring_emit(engine, MI_NOOP); - if (INTEL_INFO(engine->dev)->gen >= 7) { + if (INTEL_INFO(req)->gen >= 7) { if (num_rings) { struct intel_engine_cs *signaller; intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(num_rings)); - for_each_engine(signaller, engine->dev, i) { + for_each_engine(signaller, req, i) { if (signaller == engine) continue; @@ -829,10 +829,9 @@ unpin_out: int i915_switch_context(struct drm_i915_gem_request *req) { struct intel_engine_cs *engine = req->engine; - struct drm_i915_private *dev_priv = req->i915; WARN_ON(i915.enable_execlists); - WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); + WARN_ON(!mutex_is_locked(&to_i915(req)->dev->struct_mutex)); if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */ if (req->ctx != engine->last_context) { diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 39ed403b9de3..28614c4ecbc5 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1096,7 +1096,6 @@ void i915_gem_execbuffer_move_to_active(struct list_head *vmas, struct drm_i915_gem_request *req) { - struct intel_engine_cs *engine = i915_gem_request_get_engine(req); struct i915_vma *vma; list_for_each_entry(vma, vmas, exec_list) { @@ -1123,7 +1122,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { i915_gem_request_assign(&obj->last_fenced_req, req); if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = to_i915(req); list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, &dev_priv->mm.fence_list); } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 6447a5f9661e..8eb64f5ed78c 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2192,8 +2192,7 @@ int i915_ppgtt_init_hw(struct drm_device *dev) int i915_ppgtt_init_ring(struct drm_i915_gem_request *req) { - struct drm_i915_private *dev_priv = req->i915; - struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; + struct i915_hw_ppgtt *ppgtt = to_i915(req)->mm.aliasing_ppgtt; if (i915.enable_execlists) return 0; diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 3a23b9549f7b..0789f4581f7d 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -361,8 +361,7 @@ static void execlists_elsp_write(struct drm_i915_gem_request *rq0, { struct intel_engine_cs *engine = rq0->engine; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(rq0); uint64_t desc[2]; if (rq1) { @@ -616,7 +615,7 @@ static void execlists_context_queue(struct drm_i915_gem_request *request) struct drm_i915_gem_request *cursor; int num_elements = 0; - if (request->ctx != request->i915->kernel_context) + if (request->ctx != to_i915(request)->kernel_context) intel_lr_context_pin(request->ctx, engine); i915_gem_request_reference(request); @@ -712,14 +711,14 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request * going any further, as the i915_add_request() call * later on mustn't fail ... */ - struct intel_guc *guc = &request->i915->guc; + struct intel_guc *guc = &to_i915(request)->guc; ret = i915_guc_wq_check_space(guc->execbuf_client); if (ret) return ret; } - if (request->ctx != request->i915->kernel_context) + if (request->ctx != to_i915(request)->kernel_context) ret = intel_lr_context_pin(request->ctx, request->engine); return ret; @@ -780,7 +779,7 @@ static int intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) { struct intel_ringbuffer *ringbuf = request->ringbuf; - struct drm_i915_private *dev_priv = request->i915; + struct drm_i915_private *dev_priv = to_i915(request); struct intel_engine_cs *engine = request->engine; intel_logical_ring_advance(ringbuf); @@ -802,7 +801,7 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) if (engine->last_context != request->ctx) { if (engine->last_context) intel_lr_context_unpin(engine->last_context, engine); - if (request->ctx != request->i915->kernel_context) { + if (request->ctx != to_i915(request)->kernel_context) { intel_lr_context_pin(request->ctx, engine); engine->last_context = request->ctx; } else { @@ -898,7 +897,7 @@ int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords) int ret; WARN_ON(req == NULL); - dev_priv = req->i915; + dev_priv = to_i915(req); ret = i915_gem_check_wedge(&dev_priv->gpu_error, dev_priv->mm.interruptible); @@ -1042,7 +1041,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *engine) struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; - if (ctx_obj && (ctx != req->i915->kernel_context)) + if (ctx_obj && (ctx != to_i915(req)->kernel_context)) intel_lr_context_unpin(ctx, engine); list_del(&req->execlist_link); @@ -1177,8 +1176,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) int ret, i; struct intel_engine_cs *engine = req->engine; struct intel_ringbuffer *ringbuf = req->ringbuf; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(req); struct i915_workarounds *w = &dev_priv->workarounds; if (w->count == 0) @@ -1690,8 +1688,8 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, * not needed in 48-bit.*/ if (req->ctx->ppgtt && (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) { - if (!USES_FULL_48BIT_PPGTT(req->i915) && - !intel_vgpu_active(req->i915->dev)) { + if (!USES_FULL_48BIT_PPGTT(req) && + !intel_vgpu_active(to_i915(req)->dev)) { ret = intel_logical_ring_emit_pdps(req); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index 45200b93e9bb..9aa3d8750d97 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c @@ -327,7 +327,7 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req) enum intel_engine_id ring_id; /* Program the control registers */ - for_each_engine(engine, req->i915, ring_id) { + for_each_engine(engine, req, ring_id) { ret = emit_mocs_control_table(req, &t, ring_id); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 521cf4564329..46e2fdda6101 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -7385,8 +7385,7 @@ static void __intel_rps_boost_work(struct work_struct *work) struct drm_i915_gem_request *req = boost->req; if (!i915_gem_request_completed(req, true)) - gen6_rps_boost(to_i915(req->engine->dev), NULL, - req->emitted_jiffies); + gen6_rps_boost(to_i915(req), NULL, req->emitted_jiffies); i915_gem_request_unreference__unlocked(req); kfree(boost); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index f981bddc9bbf..8a293a25588c 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -107,7 +107,6 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 flush_domains) { struct intel_engine_cs *engine = req->engine; - struct drm_device *dev = engine->dev; u32 cmd; int ret; @@ -146,7 +145,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, cmd |= MI_EXE_FLUSH; if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && - (IS_G4X(dev) || IS_GEN5(dev))) + (IS_G4X(req) || IS_GEN5(req))) cmd |= MI_INVALIDATE_ISP; ret = intel_ring_begin(req, 2); @@ -705,8 +704,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) { int ret, i; struct intel_engine_cs *engine = req->engine; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(req); struct i915_workarounds *w = &dev_priv->workarounds; if (w->count == 0) @@ -1272,12 +1270,11 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, { #define MBOX_UPDATE_DWORDS 8 struct intel_engine_cs *signaller = signaller_req->engine; - struct drm_device *dev = signaller->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(signaller_req); struct intel_engine_cs *waiter; int i, ret, num_rings; - num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; #undef MBOX_UPDATE_DWORDS @@ -1313,12 +1310,11 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, { #define MBOX_UPDATE_DWORDS 6 struct intel_engine_cs *signaller = signaller_req->engine; - struct drm_device *dev = signaller->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(signaller_req); struct intel_engine_cs *waiter; int i, ret, num_rings; - num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; #undef MBOX_UPDATE_DWORDS @@ -1351,13 +1347,12 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req, unsigned int num_dwords) { struct intel_engine_cs *signaller = signaller_req->engine; - struct drm_device *dev = signaller->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(signaller_req); struct intel_engine_cs *useless; int i, ret, num_rings; #define MBOX_UPDATE_DWORDS 3 - num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2); #undef MBOX_UPDATE_DWORDS @@ -1437,7 +1432,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req, u32 seqno) { struct intel_engine_cs *waiter = waiter_req->engine; - struct drm_i915_private *dev_priv = waiter->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(waiter_req); int ret; ret = intel_ring_begin(waiter_req, 4); @@ -2372,8 +2367,8 @@ int intel_engine_idle(struct intel_engine_cs *engine) /* Make sure we do not trigger any retires */ return __i915_wait_request(req, - atomic_read(&to_i915(engine->dev)->gpu_error.reset_counter), - to_i915(engine->dev)->mm.interruptible, + atomic_read(&to_i915(req)->gpu_error.reset_counter), + to_i915(req)->mm.interruptible, NULL, NULL); } @@ -2501,7 +2496,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, WARN_ON(req == NULL); engine = req->engine; - dev_priv = req->i915; + dev_priv = to_i915(req); ret = i915_gem_check_wedge(&dev_priv->gpu_error, dev_priv->mm.interruptible); @@ -2600,7 +2595,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, return ret; cmd = MI_FLUSH_DW; - if (INTEL_INFO(engine->dev)->gen >= 8) + if (INTEL_INFO(req)->gen >= 8) cmd += 1; /* We always require a command barrier so that subsequent @@ -2622,7 +2617,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, intel_ring_emit(engine, cmd); intel_ring_emit(engine, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); - if (INTEL_INFO(engine->dev)->gen >= 8) { + if (INTEL_INFO(req)->gen >= 8) { intel_ring_emit(engine, 0); /* upper addr */ intel_ring_emit(engine, 0); /* value */ } else { @@ -2639,7 +2634,7 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, unsigned dispatch_flags) { struct intel_engine_cs *engine = req->engine; - bool ppgtt = USES_PPGTT(engine->dev) && + bool ppgtt = USES_PPGTT(req) && !(dispatch_flags & I915_DISPATCH_SECURE); int ret; @@ -2713,7 +2708,6 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 invalidate, u32 flush) { struct intel_engine_cs *engine = req->engine; - struct drm_device *dev = engine->dev; uint32_t cmd; int ret; @@ -2722,7 +2716,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, return ret; cmd = MI_FLUSH_DW; - if (INTEL_INFO(dev)->gen >= 8) + if (INTEL_INFO(req)->gen >= 8) cmd += 1; /* We always require a command barrier so that subsequent @@ -2743,7 +2737,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, intel_ring_emit(engine, cmd); intel_ring_emit(engine, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_INFO(req)->gen >= 8) { intel_ring_emit(engine, 0); /* upper addr */ intel_ring_emit(engine, 0); /* value */ } else { -- 2.8.0.rc3 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx