In legacy ringbuffer mode, the HWSP is a separate GEM object with its own pinning and reference counts. In LRC mode, however, it's not; instead its part of the default context object. The LRC-mode setup & teardown code therefore needs to handle this specially; the presence of the two bugs fixed in this patchset suggests that this code is not well-understood or maintained at present. So, this patch: moves the (newly-fixed!) LRC-mode HWSP teardown code to its own (trivial) function lrc_teardown_hardware_status_page(), and changes the call signature of lrc_setup_hardware_status_page() to match so that all knowledge of this special arrangement is local to these two functions. v3: Rebased Signed-off-by: Dave Gordon <david.s.gordon@xxxxxxxxx> Reviewed-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/intel_lrc.c | 41 +++++++++++++++++++++++----------------- 1 file changed, 24 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index c551c97..148d291 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -227,9 +227,8 @@ enum { static int intel_lr_context_pin(struct intel_context *ctx, struct intel_engine_cs *engine); -static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, - struct drm_i915_gem_object *default_ctx_obj); - +static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring); +static void lrc_teardown_hardware_status_page(struct intel_engine_cs *ring); /** * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists @@ -1555,8 +1554,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) struct drm_i915_private *dev_priv = dev->dev_private; u8 next_context_status_buffer_hw; - lrc_setup_hardware_status_page(ring, - dev_priv->kernel_context->engine[ring->id].state); + lrc_setup_hardware_status_page(ring); I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); @@ -2011,10 +2009,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring) i915_cmd_parser_fini_ring(ring); i915_gem_batch_pool_fini(&ring->batch_pool); - if (ring->status_page.obj) { - kunmap(kmap_to_page(ring->status_page.page_addr)); - ring->status_page.obj = NULL; - } + lrc_teardown_hardware_status_page(ring); ring->disable_lite_restore_wa = false; ring->ctx_desc_template = 0; @@ -2506,24 +2501,36 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *ring) return ret; } -static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, - struct drm_i915_gem_object *default_ctx_obj) +static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(ring->dev); + struct intel_context *dctx = dev_priv->kernel_context; + struct drm_i915_gem_object *dctx_obj = dctx->engine[ring->id].state; + u64 dctx_addr = i915_gem_obj_ggtt_offset(dctx_obj); struct page *page; - /* The HWSP is part of the default context object in LRC mode. */ - ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj) - + LRC_PPHWSP_PN * PAGE_SIZE; - page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN); + /* + * The HWSP is part of the default context object in LRC mode. + * Note that it doesn't contribute to the refcount! + */ + page = i915_gem_object_get_page(dctx_obj, LRC_PPHWSP_PN); ring->status_page.page_addr = kmap(page); - ring->status_page.obj = default_ctx_obj; + ring->status_page.gfx_addr = dctx_addr + LRC_PPHWSP_PN * PAGE_SIZE; + ring->status_page.obj = dctx_obj; I915_WRITE(RING_HWS_PGA(ring->mmio_base), (u32)ring->status_page.gfx_addr); POSTING_READ(RING_HWS_PGA(ring->mmio_base)); } +static void lrc_teardown_hardware_status_page(struct intel_engine_cs *ring) +{ + if (ring->status_page.obj) { + kunmap(kmap_to_page(ring->status_page.page_addr)); + ring->status_page.obj = NULL; + } +} + /** * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context * @ctx: LR context to create. -- 1.9.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx