This patch adds functions to setup WA batch buffers but they are not yet enabled in this patch. Some of the WA are to be applied during context save but before restore and some at the end of context save/restore but before executing the instructions in the ring, WA batch buffers are created for this purpose and these WA cannot be applied using normal means. Signed-off-by: Namrta <namrta.salonie@xxxxxxxxx> Signed-off-by: Arun Siluvery <arun.siluvery@xxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_drv.h | 3 ++ drivers/gpu/drm/i915/intel_lrc.c | 101 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 104 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 731b5ce..dd4b31d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -814,6 +814,9 @@ struct intel_context { /* Execlists */ bool rcs_initialized; + struct intel_ringbuffer *indirect_ctx_wa_bb; + struct intel_ringbuffer *per_ctx_wa_bb; + struct { struct drm_i915_gem_object *state; struct intel_ringbuffer *ringbuf; diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 0413b8f..50e1b37 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1077,6 +1077,107 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring, return 0; } +static struct intel_ringbuffer * +create_wa_bb(struct intel_engine_cs *ring, uint32_t bb_size) +{ + struct drm_device *dev = ring->dev; + struct intel_ringbuffer *ringbuf; + int ret; + + ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); + if (!ringbuf) + return NULL; + + ringbuf->ring = ring; + ringbuf->size = roundup(bb_size, PAGE_SIZE); + ringbuf->effective_size = ringbuf->size; + ringbuf->head = 0; + ringbuf->tail = 0; + ringbuf->space = ringbuf->size; + ringbuf->last_retired_head = -1; + + ret = intel_alloc_ringbuffer_obj(dev, ringbuf); + if (ret) { + DRM_DEBUG_DRIVER( + "Failed to allocate ringbuf obj for wa_bb%s: %d\n", + ring->name, ret); + kfree(ringbuf); + return NULL; + } + + ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); + if (ret) { + DRM_ERROR("Failed to pin and map %s w/a batch: %d\n", + ring->name, ret); + intel_destroy_ringbuffer_obj(ringbuf); + kfree(ringbuf); + return NULL; + } + + return ringbuf; +} + +static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring, + struct intel_context *ctx) +{ + int i; + struct intel_ringbuffer *ringbuf = NULL; + + ringbuf = create_wa_bb(ring, PAGE_SIZE); + if (!ringbuf) + return -ENOMEM; + + ctx->indirect_ctx_wa_bb = ringbuf; + + /* FIXME: fill one cache line with NOOPs. + * Replace these instructions with WA + */ + for (i = 0; i < 16; ++i) + intel_logical_ring_emit(ringbuf, MI_NOOP); + + /* + * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because + * execution depends on the size defined in CTX_RCS_INDIRECT_CTX + */ + + return 0; +} + +static int gen8_init_perctx_bb(struct intel_engine_cs *ring, + struct intel_context *ctx) +{ + int i; + struct intel_ringbuffer *ringbuf = NULL; + + ringbuf = create_wa_bb(ring, PAGE_SIZE); + if (!ringbuf) + return -ENOMEM; + + ctx->per_ctx_wa_bb = ringbuf; + + /* FIXME: Replace these instructions with WA */ + for (i = 0; i < 15; ++i) + intel_logical_ring_emit(ringbuf, MI_NOOP); + + intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_END); + + return 0; +} + +static int intel_init_workaround_bb(struct intel_engine_cs *ring, + struct intel_context *ctx) +{ + int ret; + struct drm_device *dev = ring->dev; + + if (WARN_ON(ring->id != RCS)) + return -EINVAL; + + /* FIXME: Add Gen specific init functions */ + + return 0; +} + static int gen8_init_common_ring(struct intel_engine_cs *ring) { struct drm_device *dev = ring->dev; -- 2.3.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx