Previously the PDPs inside the ring context will be updated - When populating a LRC context - Before submitting a LRC context (only for 32 bit PPGTT, as the amount of used PDPs may be changed during PPGTT page table grow) Under GVT-g, each VM owns a GVT context used as the shadow context. When guest submits a context, GVT-g will load guest context into the GVT context, and submit this context to i915 GEM submission system. So this GVT context could be used by different guest context, and the PPGTT root pointer in guest contexts might be different as well, if guest is using full PPGTT mode. In current i915, the root pointer in a LRC context will only be updated during the LRC context creation. This patch postpones the root pointer upgrade to the time of submission, which will give GVT-g a chance to reload a new PPGTT page table root pointer into an existing GVT context. Signed-off-by: Zhi Wang <zhi.a.wang@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/intel_lrc.c | 54 +++++++++++++++++++--------------------- 2 files changed, 27 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 972b4ce..1281bbf 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -891,6 +891,7 @@ struct intel_context { struct i915_vma *lrc_vma; u64 lrc_desc; uint32_t *lrc_reg_state; + bool root_pointer_dirty; } engine[I915_NUM_RINGS]; struct list_head link; diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 01ea99c2..19c6b46 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -391,22 +391,40 @@ static int execlists_update_context(struct drm_i915_gem_request *rq) { struct intel_engine_cs *ring = rq->ring; struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; + struct drm_i915_private *dev_priv = to_i915(ring->dev); + uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state; reg_state[CTX_RING_TAIL+1] = rq->tail; - if (ppgtt && !IS_48BIT_PPGTT(ppgtt)) { - /* True 32b PPGTT with dynamic page allocation: update PDP - * registers and point the unallocated PDPs to scratch page. - * PML4 is allocated during ppgtt init, so this is not needed - * in 48-bit mode. + if (!ppgtt) + ppgtt = dev_priv->mm.aliasing_ppgtt; + + /* Update root pointers for 32b PPGTT in every submission */ + if (!rq->ctx->engine[ring->id].root_pointer_dirty + && !IS_48BIT_PPGTT(ppgtt)) + return 0; + + if (!IS_48BIT_PPGTT(ppgtt)) { + /* 32b PPGTT + * PDP*_DESCRIPTOR contains the base address of space + * supported. With dynamic page allocation, PDPs may + * not be allocated at this point. Point the unallocated + * PDPs to the scratch page */ ASSIGN_CTX_PDP(ppgtt, reg_state, 3); ASSIGN_CTX_PDP(ppgtt, reg_state, 2); ASSIGN_CTX_PDP(ppgtt, reg_state, 1); ASSIGN_CTX_PDP(ppgtt, reg_state, 0); + } else { + /* 64b PPGTT (48bit canonical) + * PDP0_DESCRIPTOR contains the base address to PML4 + * and other PDP Descriptors are ignored. + */ + ASSIGN_CTX_PML4(ppgtt, reg_state); } + rq->ctx->engine[ring->id].root_pointer_dirty = false; return 0; } @@ -2322,15 +2340,10 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf) { struct drm_device *dev = ring->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; struct page *page; uint32_t *reg_state; int ret; - if (!ppgtt) - ppgtt = dev_priv->mm.aliasing_ppgtt; - ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true); if (ret) { DRM_DEBUG_DRIVER("Could not set to CPU domain\n"); @@ -2408,24 +2421,6 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(ring, 0), 0); ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(ring, 0), 0); - if (IS_48BIT_PPGTT(ppgtt)) { - /* 64b PPGTT (48bit canonical) - * PDP0_DESCRIPTOR contains the base address to PML4 and - * other PDP Descriptors are ignored. - */ - ASSIGN_CTX_PML4(ppgtt, reg_state); - } else { - /* 32b PPGTT - * PDP*_DESCRIPTOR contains the base address of space supported. - * With dynamic page allocation, PDPs may not be allocated at - * this point. Point the unallocated PDPs to the scratch page - */ - ASSIGN_CTX_PDP(ppgtt, reg_state, 3); - ASSIGN_CTX_PDP(ppgtt, reg_state, 2); - ASSIGN_CTX_PDP(ppgtt, reg_state, 1); - ASSIGN_CTX_PDP(ppgtt, reg_state, 0); - } - if (ring->id == RCS) { reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, @@ -2435,6 +2430,9 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o kunmap_atomic(reg_state); i915_gem_object_unpin_pages(ctx_obj); + /* PDPs inside the ring context need to be updated */ + ctx->engine[ring->id].root_pointer_dirty = true; + return 0; } -- 1.9.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx