From: Oscar Mateo <oscar.mateo@xxxxxxxxx> Same as with the context, pinning to GGTT regardless is harmful (it badly fragments the GGTT and can even exhaust it). Unfortunately, this case is also more complex than the previous one because we need to map and access the ringbuffer in several places along the execbuffer path (and we cannot make do by leaving the default ringbuffer pinned, as before). Also, the context object itself contains a pointer to the ringbuffer address that we have to keep updated if we are going to allow the ringbuffer to move around. v2: Same as with the context pinning, we cannot really do it during an interrupt. Also, pin the default ringbuffers objects regardless (makes error capture a lot easier). Signed-off-by: Oscar Mateo <oscar.mateo@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem.c | 5 +- drivers/gpu/drm/i915/intel_lrc.c | 80 ++++++++++++++++++++--------- drivers/gpu/drm/i915/intel_ringbuffer.c | 83 ++++++++++++++++++------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 3 ++ 4 files changed, 111 insertions(+), 60 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 42faaa3..1a852b9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2498,13 +2498,16 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request) if (ctx) { struct intel_engine_cs *ring = request->ring; + struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; atomic_t *unpin_count = &ctx->engine[ring->id].unpin_count; if (ctx_obj) { if (atomic_dec_return(unpin_count) == 0 && - ctx != ring->default_context) + ctx != ring->default_context) { + intel_unpin_ringbuffer_obj(ringbuf); i915_gem_object_ggtt_unpin(ctx_obj); + } } i915_gem_context_unreference(ctx); } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 9fa8e35..4ca8278 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -315,7 +315,9 @@ static void execlists_elsp_write(struct intel_engine_cs *ring, spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); } -static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tail) +static int execlists_update_context(struct drm_i915_gem_object *ctx_obj, + struct drm_i915_gem_object *ring_obj, + u32 tail) { struct page *page; uint32_t *reg_state; @@ -324,6 +326,7 @@ static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tai reg_state = kmap_atomic(page); reg_state[CTX_RING_TAIL+1] = tail; + reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj); kunmap_atomic(reg_state); @@ -334,21 +337,25 @@ static int execlists_submit_context(struct intel_engine_cs *ring, struct intel_context *to0, u32 tail0, struct intel_context *to1, u32 tail1) { - struct drm_i915_gem_object *ctx_obj0; + struct drm_i915_gem_object *ctx_obj0 = to0->engine[ring->id].state; + struct intel_ringbuffer *ringbuf0 = to0->engine[ring->id].ringbuf; struct drm_i915_gem_object *ctx_obj1 = NULL; + struct intel_ringbuffer *ringbuf1 = NULL; - ctx_obj0 = to0->engine[ring->id].state; BUG_ON(!ctx_obj0); BUG_ON(!i915_gem_obj_is_pinned(ctx_obj0)); + BUG_ON(!i915_gem_obj_is_pinned(ringbuf0->obj)); - execlists_ctx_write_tail(ctx_obj0, tail0); + execlists_update_context(ctx_obj0, ringbuf0->obj, tail0); if (to1) { + ringbuf1 = to1->engine[ring->id].ringbuf; ctx_obj1 = to1->engine[ring->id].state; BUG_ON(!ctx_obj1); BUG_ON(!i915_gem_obj_is_pinned(ctx_obj1)); + BUG_ON(!i915_gem_obj_is_pinned(ringbuf1->obj)); - execlists_ctx_write_tail(ctx_obj1, tail1); + execlists_update_context(ctx_obj1, ringbuf1->obj, tail1); } execlists_elsp_write(ring, ctx_obj0, ctx_obj1); @@ -772,6 +779,7 @@ static int logical_ring_alloc_seqno(struct intel_engine_cs *ring, if (ring->preallocated_lazy_request == NULL) { struct drm_i915_gem_request *request; + struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; atomic_t *unpin_count = &ctx->engine[ring->id].unpin_count; @@ -787,6 +795,13 @@ static int logical_ring_alloc_seqno(struct intel_engine_cs *ring, kfree(request); return ret; } + + ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf); + if (ret) { + i915_gem_object_ggtt_unpin(ctx_obj); + kfree(request); + return ret; + } } /* Hold a reference to the context this request belongs to @@ -1546,7 +1561,13 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base); reg_state[CTX_RING_TAIL+1] = 0; reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base); + + ret = i915_gem_obj_ggtt_pin(ring_obj, PAGE_SIZE, 0); + if (ret) + goto error; reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj); + i915_gem_object_ggtt_unpin(ring_obj); + reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base); reg_state[CTX_RING_BUFFER_CONTROL+1] = ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID; @@ -1599,13 +1620,14 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o reg_state[CTX_R_PWR_CLK_STATE+1] = 0; } +error: kunmap_atomic(reg_state); ctx_obj->dirty = 1; set_page_dirty(page); i915_gem_object_unpin_pages(ctx_obj); - return 0; + return ret; } /** @@ -1627,10 +1649,12 @@ void intel_lr_context_free(struct intel_context *ctx) struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf; struct intel_engine_cs *ring = ringbuf->ring; + if (ctx == ring->default_context) { + intel_unpin_ringbuffer_obj(ringbuf); + i915_gem_object_ggtt_unpin(ctx_obj); + } intel_destroy_ringbuffer_obj(ringbuf); kfree(ringbuf); - if (ctx == ring->default_context) - i915_gem_object_ggtt_unpin(ctx_obj); drm_gem_object_unreference(&ctx_obj->base); } } @@ -1706,11 +1730,8 @@ int intel_lr_context_deferred_create(struct intel_context *ctx, if (!ringbuf) { DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n", ring->name); - if (is_global_default_ctx) - i915_gem_object_ggtt_unpin(ctx_obj); - drm_gem_object_unreference(&ctx_obj->base); ret = -ENOMEM; - return ret; + goto error_unpin_ctx; } ringbuf->ring = ring; @@ -1722,22 +1743,28 @@ int intel_lr_context_deferred_create(struct intel_context *ctx, ringbuf->space = ringbuf->size; ringbuf->last_retired_head = -1; - /* TODO: For now we put this in the mappable region so that we can reuse - * the existing ringbuffer code which ioremaps it. When we start - * creating many contexts, this will no longer work and we must switch - * to a kmapish interface. - */ - ret = intel_alloc_ringbuffer_obj(dev, ringbuf); - if (ret) { - DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n", - ring->name, ret); - goto error; + if (ringbuf->obj == NULL) { + ret = intel_alloc_ringbuffer_obj(dev, ringbuf); + if (ret) { + DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n", + ring->name, ret); + goto error_free_rbuf; + } + + if (is_global_default_ctx) { + ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); + if (ret) { + DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", + ring->name, ret); + goto error_destroy_rbuf; + } + } + } ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf); if (ret) { DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret); - intel_destroy_ringbuffer_obj(ringbuf); goto error; } @@ -1753,7 +1780,6 @@ int intel_lr_context_deferred_create(struct intel_context *ctx, DRM_ERROR("Init render state failed: %d\n", ret); ctx->engine[ring->id].ringbuf = NULL; ctx->engine[ring->id].state = NULL; - intel_destroy_ringbuffer_obj(ringbuf); goto error; } ctx->rcs_initialized = true; @@ -1762,7 +1788,13 @@ int intel_lr_context_deferred_create(struct intel_context *ctx, return 0; error: + if (is_global_default_ctx) + intel_unpin_ringbuffer_obj(ringbuf); +error_destroy_rbuf: + intel_destroy_ringbuffer_obj(ringbuf); +error_free_rbuf: kfree(ringbuf); +error_unpin_ctx: if (is_global_default_ctx) i915_gem_object_ggtt_unpin(ctx_obj); drm_gem_object_unreference(&ctx_obj->base); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 6e604c9..020588c 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1513,13 +1513,42 @@ static int init_phys_status_page(struct intel_engine_cs *ring) return 0; } -void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) +void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) { - if (!ringbuf->obj) - return; - iounmap(ringbuf->virtual_start); + ringbuf->virtual_start = NULL; i915_gem_object_ggtt_unpin(ringbuf->obj); +} + +int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, + struct intel_ringbuffer *ringbuf) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_gem_object *obj = ringbuf->obj; + int ret; + + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); + if (ret) + return ret; + + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) { + i915_gem_object_ggtt_unpin(obj); + return ret; + } + + ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base + + i915_gem_obj_ggtt_offset(obj), ringbuf->size); + if (ringbuf->virtual_start == NULL) { + i915_gem_object_ggtt_unpin(obj); + return -EINVAL; + } + + return 0; +} + +void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) +{ drm_gem_object_unreference(&ringbuf->obj->base); ringbuf->obj = NULL; } @@ -1527,12 +1556,7 @@ void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) int intel_alloc_ringbuffer_obj(struct drm_device *dev, struct intel_ringbuffer *ringbuf) { - struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; - int ret; - - if (ringbuf->obj) - return 0; obj = NULL; if (!HAS_LLC(dev)) @@ -1545,30 +1569,9 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev, /* mark ring buffers as read-only from GPU side by default */ obj->gt_ro = 1; - ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); - if (ret) - goto err_unref; - - ret = i915_gem_object_set_to_gtt_domain(obj, true); - if (ret) - goto err_unpin; - - ringbuf->virtual_start = - ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), - ringbuf->size); - if (ringbuf->virtual_start == NULL) { - ret = -EINVAL; - goto err_unpin; - } - ringbuf->obj = obj; - return 0; -err_unpin: - i915_gem_object_ggtt_unpin(obj); -err_unref: - drm_gem_object_unreference(&obj->base); - return ret; + return 0; } static int intel_init_ring_buffer(struct drm_device *dev, @@ -1606,10 +1609,19 @@ static int intel_init_ring_buffer(struct drm_device *dev, goto error; } - ret = intel_alloc_ringbuffer_obj(dev, ringbuf); - if (ret) { - DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret); - goto error; + if (ringbuf->obj == NULL) { + ret = intel_alloc_ringbuffer_obj(dev, ringbuf); + if (ret) { + DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret); + goto error; + } + + ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); + if (ret) { + DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", ring->name, ret); + intel_destroy_ringbuffer_obj(ringbuf); + goto error; + } } /* Workaround an erratum on the i830 which causes a hang if @@ -1647,6 +1659,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) intel_stop_ring_buffer(ring); WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); + intel_unpin_ringbuffer_obj(ringbuf); intel_destroy_ringbuffer_obj(ringbuf); ring->preallocated_lazy_request = NULL; ring->outstanding_lazy_seqno = 0; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 905d1ba..81be2db 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -371,6 +371,9 @@ intel_write_status_page(struct intel_engine_cs *ring, #define I915_GEM_HWS_SCRATCH_INDEX 0x30 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) +void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); +int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, + struct intel_ringbuffer *ringbuf); void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf); int intel_alloc_ringbuffer_obj(struct drm_device *dev, struct intel_ringbuffer *ringbuf); -- 1.7.9.5 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx