Now that we have disambuigated ring and engine, we can use the clearer and more consistent name for the intel_ringbuffer pointer in the request. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem.c | 8 +- drivers/gpu/drm/i915/i915_gem_context.c | 2 +- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 4 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 6 +- drivers/gpu/drm/i915/i915_gem_request.c | 20 ++-- drivers/gpu/drm/i915/i915_gem_request.h | 2 +- drivers/gpu/drm/i915/i915_gpu_error.c | 31 +++--- drivers/gpu/drm/i915/i915_guc_submission.c | 4 +- drivers/gpu/drm/i915/intel_display.c | 10 +- drivers/gpu/drm/i915/intel_lrc.c | 152 ++++++++++++++--------------- drivers/gpu/drm/i915/intel_mocs.c | 34 +++---- drivers/gpu/drm/i915/intel_overlay.c | 42 ++++---- drivers/gpu/drm/i915/intel_ringbuffer.c | 86 ++++++++-------- 13 files changed, 198 insertions(+), 203 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6622c9bb3af8..430c439ece26 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4083,11 +4083,11 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice) * at initialization time. */ for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) { - intel_ring_emit(req->ringbuf, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(req->ringbuf, GEN7_L3LOG(slice, i)); - intel_ring_emit(req->ringbuf, remap_info[i]); + intel_ring_emit(req->ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit_reg(req->ring, GEN7_L3LOG(slice, i)); + intel_ring_emit(req->ring, remap_info[i]); } - intel_ring_advance(req->ringbuf); + intel_ring_advance(req->ring); return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index dece033cf604..5b4e77a80c19 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -519,7 +519,7 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) static inline int mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; u32 flags = hw_flags | MI_MM_SPACE_GTT; const int num_rings = /* Use an extended w/a on ivb+ if signalling from other rings */ diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index e7df91f9a51f..a0f5a997c2f2 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1148,7 +1148,7 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params) static int i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; int ret, i; if (!IS_GEN7(req->i915) || req->engine->id != RCS) { @@ -1229,7 +1229,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, struct drm_i915_gem_execbuffer2 *args, struct list_head *vmas) { - struct intel_ringbuffer *ring = params->request->ringbuf; + struct intel_ringbuffer *ring = params->request->ring; struct drm_i915_private *dev_priv = params->request->i915; u64 exec_start, exec_len; int instp_mode; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index cb7cb59d4c4a..38c109cda904 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -656,7 +656,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req, unsigned entry, dma_addr_t addr) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; int ret; BUG_ON(entry >= 4); @@ -1648,7 +1648,7 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_request *req) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; int ret; /* NB: TLBs must be flushed and invalidated before a switch */ @@ -1686,7 +1686,7 @@ static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt, static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_request *req) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; int ret; /* NB: TLBs must be flushed and invalidated before a switch */ diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 8adf2c134048..4cc64d9cca12 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -255,7 +255,7 @@ int i915_gem_request_alloc(struct intel_engine_cs *engine, * to be redone if the request is not actually submitted straight * away, e.g. because a GPU scheduler has deferred it. */ - intel_ring_reserved_space_reserve(req->ringbuf, + intel_ring_reserved_space_reserve(req->ring, MIN_SPACE_FOR_ADD_REQUEST); ret = intel_ring_begin(req, 0); if (ret) { @@ -328,7 +328,7 @@ static void __i915_gem_request_release(struct drm_i915_gem_request *request) void i915_gem_request_cancel(struct drm_i915_gem_request *req) { - intel_ring_reserved_space_cancel(req->ringbuf); + intel_ring_reserved_space_cancel(req->ring); if (i915.enable_execlists) { if (req->ctx != req->engine->default_context) intel_lr_context_unpin(req); @@ -349,7 +349,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) * Note this requires that we are always called in request * completion order. */ - request->ringbuf->last_retired_head = request->postfix; + request->ring->last_retired_head = request->postfix; __i915_gem_request_release(request); } @@ -401,23 +401,23 @@ void __i915_add_request(struct drm_i915_gem_request *request, struct drm_i915_gem_object *obj, bool flush_caches) { - struct intel_ringbuffer *ringbuf; + struct intel_ringbuffer *ring; u32 request_start; int ret; if (WARN_ON(request == NULL)) return; - ringbuf = request->ringbuf; + ring = request->ring; /* * To ensure that this call will not fail, space for its emissions * should already have been reserved in the ring buffer. Let the ring * know that it is time to use that space up. */ - intel_ring_reserved_space_use(ringbuf); + intel_ring_reserved_space_use(ring); - request_start = intel_ring_get_tail(ringbuf); + request_start = intel_ring_get_tail(ring); /* * Emit any outstanding flushes - execbuf can fail to emit the flush * after having emitted the batchbuffer command. Hence we need to fix @@ -439,14 +439,14 @@ void __i915_add_request(struct drm_i915_gem_request *request, * GPU processing the request, we never over-estimate the * position of the head. */ - request->postfix = intel_ring_get_tail(ringbuf); + request->postfix = intel_ring_get_tail(ring); if (i915.enable_execlists) ret = request->engine->emit_request(request); else { ret = request->engine->add_request(request); - request->tail = intel_ring_get_tail(ringbuf); + request->tail = intel_ring_get_tail(ring); } /* Not allowed to fail! */ WARN(ret, "emit|add_request failed: %d!\n", ret); @@ -471,7 +471,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, i915_gem_mark_busy(request->i915); /* Sanity check that the reserved size was large enough. */ - intel_ring_reserved_space_end(ringbuf); + intel_ring_reserved_space_end(ring); } diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h index 802862e5007d..bd17e3a9a71d 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.h +++ b/drivers/gpu/drm/i915/i915_gem_request.h @@ -79,7 +79,7 @@ struct drm_i915_gem_request { * context. */ struct intel_context *ctx; - struct intel_ringbuffer *ringbuf; + struct intel_ringbuffer *ring; /** Batch buffer related to this request if any (used for error state dump only) */ diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 5bf208d8009e..b47ca1b7041f 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -993,21 +993,21 @@ static void i915_gem_record_rings(struct drm_device *dev, int i, count; for (i = 0; i < I915_NUM_RINGS; i++) { - struct intel_engine_cs *ring = &dev_priv->ring[i]; + struct intel_engine_cs *engine = &dev_priv->ring[i]; error->ring[i].pid = -1; - if (ring->dev == NULL) + if (engine->dev == NULL) continue; error->ring[i].valid = true; - i915_record_ring_state(dev, error, ring, &error->ring[i]); + i915_record_ring_state(dev, error, engine, &error->ring[i]); - request = i915_gem_find_active_request(ring); + request = i915_gem_find_active_request(engine); if (request) { struct i915_address_space *vm; - struct intel_ringbuffer *rb; + struct intel_ringbuffer *ring; vm = request->ctx && request->ctx->ppgtt ? &request->ctx->ppgtt->base : @@ -1022,10 +1022,10 @@ static void i915_gem_record_rings(struct drm_device *dev, request->batch_obj, vm); - if (HAS_BROKEN_CS_TLB(dev_priv->dev)) + if (HAS_BROKEN_CS_TLB(dev_priv)) error->ring[i].wa_batchbuffer = i915_error_ggtt_object_create(dev_priv, - ring->scratch.obj); + engine->scratch.obj); if (request->pid) { struct task_struct *task; @@ -1041,21 +1041,22 @@ static void i915_gem_record_rings(struct drm_device *dev, error->simulated |= request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE; - rb = request->ringbuf; - error->ring[i].cpu_ring_head = rb->head; - error->ring[i].cpu_ring_tail = rb->tail; + ring = request->ring; + error->ring[i].cpu_ring_head = ring->head; + error->ring[i].cpu_ring_tail = ring->tail; error->ring[i].ringbuffer = i915_error_ggtt_object_create(dev_priv, - rb->obj); + ring->obj); } error->ring[i].hws_page = - i915_error_ggtt_object_create(dev_priv, ring->status_page.obj); + i915_error_ggtt_object_create(dev_priv, + engine->status_page.obj); - i915_gem_record_active_context(ring, error, &error->ring[i]); + i915_gem_record_active_context(engine, error, &error->ring[i]); count = 0; - list_for_each_entry(request, &ring->request_list, list) + list_for_each_entry(request, &engine->request_list, list) count++; error->ring[i].num_requests = count; @@ -1068,7 +1069,7 @@ static void i915_gem_record_rings(struct drm_device *dev, } count = 0; - list_for_each_entry(request, &ring->request_list, list) { + list_for_each_entry(request, &engine->request_list, list) { struct drm_i915_error_request *erq; if (count >= error->ring[i].num_requests) { diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index eaf680ce5c9c..e82cc9182dfa 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -551,7 +551,7 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc, wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, rq->engine); /* The GuC firmware wants the tail index in QWords, not bytes */ - tail = rq->ringbuf->tail >> 3; + tail = rq->ring->tail >> 3; wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT; wqi->fence_id = 0; /*XXX: what fence to be here */ @@ -567,7 +567,7 @@ static void lr_context_update(struct drm_i915_gem_request *rq) { enum intel_ring_id ring_id = rq->engine->id; struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring_id].state; - struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj; + struct drm_i915_gem_object *rb_obj = rq->ring->obj; struct page *page; uint32_t *reg_state; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 323b0d905c89..0d42356f15b4 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -11052,7 +11052,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); u32 flip_mask; int ret; @@ -11087,7 +11087,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); u32 flip_mask; int ret; @@ -11119,7 +11119,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; struct drm_i915_private *dev_priv = req->i915; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t pf, pipesrc; @@ -11158,7 +11158,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; struct drm_i915_private *dev_priv = req->i915; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t pf, pipesrc; @@ -11194,7 +11194,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t plane_bit = 0; int len, ret; diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 1b70a76df31d..87d325b6e7dc 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -360,7 +360,7 @@ static int execlists_update_context(struct drm_i915_gem_request *rq) { struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[rq->engine->id].state; - struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj; + struct drm_i915_gem_object *rb_obj = rq->ring->obj; struct page *page; uint32_t *reg_state; @@ -671,7 +671,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request { int ret; - request->ringbuf = request->ctx->engine[request->engine->id].ringbuf; + request->ring = request->ctx->engine[request->engine->id].ringbuf; if (request->ctx != request->engine->default_context) { ret = intel_lr_context_pin(request); @@ -709,8 +709,8 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) { struct drm_i915_private *dev_priv = request->i915; - intel_ring_advance(request->ringbuf); - request->tail = request->ringbuf->tail; + intel_ring_advance(request->ring); + request->tail = request->ring->tail; if (dev_priv->guc.execbuf_client) i915_guc_submit(dev_priv->guc.execbuf_client, request); @@ -740,9 +740,9 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, struct list_head *vmas) { struct drm_device *dev = params->dev; - struct intel_engine_cs *ring = params->ring; + struct intel_engine_cs *engine = params->ring; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf; + struct intel_ringbuffer *ring = params->request->ring; u64 exec_start; int instp_mode; u32 instp_mask; @@ -754,7 +754,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, case I915_EXEC_CONSTANTS_REL_GENERAL: case I915_EXEC_CONSTANTS_ABSOLUTE: case I915_EXEC_CONSTANTS_REL_SURFACE: - if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) { + if (instp_mode != 0 && engine->id != RCS) { DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); return -EINVAL; } @@ -783,17 +783,17 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, if (ret) return ret; - if (ring == &dev_priv->ring[RCS] && + if (engine->id == RCS && instp_mode != dev_priv->relative_constants_mode) { ret = intel_ring_begin(params->request, 4); if (ret) return ret; - intel_ring_emit(ringbuf, MI_NOOP); - intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(ringbuf, INSTPM); - intel_ring_emit(ringbuf, instp_mask << 16 | instp_mode); - intel_ring_advance(ringbuf); + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit_reg(ring, INSTPM); + intel_ring_emit(ring, instp_mask << 16 | instp_mode); + intel_ring_advance(ring); dev_priv->relative_constants_mode = instp_mode; } @@ -801,7 +801,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, exec_start = params->batch_obj_vm_offset + args->batch_start_offset; - ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags); + ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags); if (ret) return ret; @@ -880,13 +880,12 @@ static int intel_lr_context_do_pin(struct intel_engine_cs *ring, struct drm_i915_gem_object *ctx_obj, struct intel_ringbuffer *ringbuf) { - struct drm_device *dev = ring->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = ring->i915; int ret = 0; WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, - PIN_OFFSET_BIAS | GUC_WOPCM_TOP); + PIN_OFFSET_BIAS | GUC_WOPCM_TOP); if (ret) return ret; @@ -918,7 +917,7 @@ static int intel_lr_context_pin(struct drm_i915_gem_request *rq) ret = intel_lr_context_do_pin(rq->engine, rq->ctx->engine[engine].state, - rq->ringbuf); + rq->ring); if (ret) { rq->ctx->engine[engine].pin_count = 0; return ret; @@ -932,12 +931,12 @@ void intel_lr_context_unpin(struct drm_i915_gem_request *rq) { int engine = rq->engine->id; struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[engine].state; - struct intel_ringbuffer *ringbuf = rq->ringbuf; + struct intel_ringbuffer *ring = rq->ring; if (ctx_obj) { WARN_ON(!mutex_is_locked(&rq->i915->dev->struct_mutex)); if (--rq->ctx->engine[engine].pin_count == 0) { - intel_unpin_ringbuffer_obj(ringbuf); + intel_unpin_ringbuffer_obj(ring); i915_gem_object_ggtt_unpin(ctx_obj); i915_gem_context_unreference(rq->ctx); } @@ -948,7 +947,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) { int ret, i; struct intel_engine_cs *engine = req->engine; - struct intel_ringbuffer *ringbuf = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; struct drm_i915_private *dev_priv = req->i915; struct i915_workarounds *w = &dev_priv->workarounds; @@ -964,14 +963,14 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) if (ret) return ret; - intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count)); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count)); for (i = 0; i < w->count; i++) { - intel_ring_emit_reg(ringbuf, w->reg[i].addr); - intel_ring_emit(ringbuf, w->reg[i].value); + intel_ring_emit_reg(ring, w->reg[i].addr); + intel_ring_emit(ring, w->reg[i].value); } - intel_ring_emit(ringbuf, MI_NOOP); + intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ringbuf); + intel_ring_advance(ring); engine->gpu_caches_dirty = true; ret = logical_ring_flush_all_caches(req); @@ -1418,7 +1417,7 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) { struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt; struct intel_engine_cs *engine = req->engine; - struct intel_ringbuffer *ringbuf = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; const int num_lri_cmds = GEN8_LEGACY_PDPES * 2; int i, ret; @@ -1426,18 +1425,18 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) if (ret) return ret; - intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds)); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds)); for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) { const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); - intel_ring_emit_reg(ringbuf, GEN8_RING_PDP_UDW(engine, i)); - intel_ring_emit(ringbuf, upper_32_bits(pd_daddr)); - intel_ring_emit_reg(ringbuf, GEN8_RING_PDP_LDW(engine, i)); - intel_ring_emit(ringbuf, lower_32_bits(pd_daddr)); + intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i)); + intel_ring_emit(ring, upper_32_bits(pd_daddr)); + intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i)); + intel_ring_emit(ring, lower_32_bits(pd_daddr)); } - intel_ring_emit(ringbuf, MI_NOOP); - intel_ring_advance(ringbuf); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } @@ -1445,7 +1444,7 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) static int gen8_emit_bb_start(struct drm_i915_gem_request *req, u64 offset, unsigned dispatch_flags) { - struct intel_ringbuffer *ringbuf = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE); int ret; @@ -1472,14 +1471,14 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, return ret; /* FIXME(BDW): Address space and security selectors. */ - intel_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | + intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) | (dispatch_flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0)); - intel_ring_emit(ringbuf, lower_32_bits(offset)); - intel_ring_emit(ringbuf, upper_32_bits(offset)); - intel_ring_emit(ringbuf, MI_NOOP); - intel_ring_advance(ringbuf); + intel_ring_emit(ring, lower_32_bits(offset)); + intel_ring_emit(ring, upper_32_bits(offset)); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } @@ -1504,10 +1503,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 invalidate_domains, u32 unused) { - struct intel_ringbuffer *ringbuf = request->ringbuf; - struct intel_engine_cs *ring = ringbuf->ring; - struct drm_device *dev = ring->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ringbuffer *ring = request->ring; uint32_t cmd; int ret; @@ -1526,17 +1522,17 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, if (invalidate_domains & I915_GEM_GPU_DOMAINS) { cmd |= MI_INVALIDATE_TLB; - if (ring == &dev_priv->ring[VCS]) + if (request->engine->id == VCS) cmd |= MI_INVALIDATE_BSD; } - intel_ring_emit(ringbuf, cmd); - intel_ring_emit(ringbuf, + intel_ring_emit(ring, cmd); + intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); - intel_ring_emit(ringbuf, 0); /* upper addr */ - intel_ring_emit(ringbuf, 0); /* value */ - intel_ring_advance(ringbuf); + intel_ring_emit(ring, 0); /* upper addr */ + intel_ring_emit(ring, 0); /* value */ + intel_ring_advance(ring); return 0; } @@ -1545,9 +1541,8 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, u32 invalidate_domains, u32 flush_domains) { - struct intel_ringbuffer *ringbuf = request->ringbuf; - struct intel_engine_cs *ring = ringbuf->ring; - u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; + struct intel_ringbuffer *ring = request->ring; + u32 scratch_addr = request->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; bool vf_flush_wa = false; u32 flags = 0; int ret; @@ -1574,7 +1569,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL * pipe control. */ - if (IS_GEN9(ring->dev)) + if (IS_GEN9(request->i915)) vf_flush_wa = true; } @@ -1583,21 +1578,21 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, return ret; if (vf_flush_wa) { - intel_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); - intel_ring_emit(ringbuf, 0); - intel_ring_emit(ringbuf, 0); - intel_ring_emit(ringbuf, 0); - intel_ring_emit(ringbuf, 0); - intel_ring_emit(ringbuf, 0); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); } - intel_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); - intel_ring_emit(ringbuf, flags); - intel_ring_emit(ringbuf, scratch_addr); - intel_ring_emit(ringbuf, 0); - intel_ring_emit(ringbuf, 0); - intel_ring_emit(ringbuf, 0); - intel_ring_advance(ringbuf); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(ring, flags); + intel_ring_emit(ring, scratch_addr); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); return 0; } @@ -1625,8 +1620,7 @@ gen6_seqno_barrier(struct intel_engine_cs *ring) static int gen8_emit_request(struct drm_i915_gem_request *request) { - struct intel_ringbuffer *ringbuf = request->ringbuf; - struct intel_engine_cs *ring = ringbuf->ring; + struct intel_ringbuffer *ring = request->ring; u32 cmd; int ret; @@ -1642,23 +1636,23 @@ static int gen8_emit_request(struct drm_i915_gem_request *request) cmd = MI_STORE_DWORD_IMM_GEN4; cmd |= MI_GLOBAL_GTT; - intel_ring_emit(ringbuf, cmd); - intel_ring_emit(ringbuf, - (ring->status_page.gfx_addr + + intel_ring_emit(ring, cmd); + intel_ring_emit(ring, + (request->engine->status_page.gfx_addr + (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT))); - intel_ring_emit(ringbuf, 0); - intel_ring_emit(ringbuf, request->fence.seqno); - intel_ring_emit(ringbuf, MI_USER_INTERRUPT); - intel_ring_emit(ringbuf, MI_NOOP); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, request->fence.seqno); + intel_ring_emit(ring, MI_USER_INTERRUPT); + intel_ring_emit(ring, MI_NOOP); intel_logical_ring_advance_and_submit(request); /* * Here we add two extra NOOPs as padding to avoid * lite restore of a context with HEAD==TAIL. */ - intel_ring_emit(ringbuf, MI_NOOP); - intel_ring_emit(ringbuf, MI_NOOP); - intel_ring_advance(ringbuf); + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index 40041bebc3dc..039c7405f640 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c @@ -191,9 +191,9 @@ static i915_reg_t mocs_register(enum intel_ring_id ring, int index) */ static int emit_mocs_control_table(struct drm_i915_gem_request *req, const struct drm_i915_mocs_table *table, - enum intel_ring_id ring) + enum intel_ring_id id) { - struct intel_ringbuffer *ringbuf = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; unsigned int index; int ret; @@ -204,11 +204,11 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req, if (ret) return ret; - intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES)); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES)); for (index = 0; index < table->size; index++) { - intel_ring_emit_reg(ringbuf, mocs_register(ring, index)); - intel_ring_emit(ringbuf, table->table[index].control_value); + intel_ring_emit_reg(ring, mocs_register(id, index)); + intel_ring_emit(ring, table->table[index].control_value); } /* @@ -220,12 +220,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req, * that value to all the used entries. */ for (; index < GEN9_NUM_MOCS_ENTRIES; index++) { - intel_ring_emit_reg(ringbuf, mocs_register(ring, index)); - intel_ring_emit(ringbuf, table->table[0].control_value); + intel_ring_emit_reg(ring, mocs_register(id, index)); + intel_ring_emit(ring, table->table[0].control_value); } - intel_ring_emit(ringbuf, MI_NOOP); - intel_ring_advance(ringbuf); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } @@ -244,7 +244,7 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req, static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, const struct drm_i915_mocs_table *table) { - struct intel_ringbuffer *ringbuf = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; unsigned int count; unsigned int i; u32 value; @@ -259,15 +259,15 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, if (ret) return ret; - intel_ring_emit(ringbuf, + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2)); for (i = 0, count = 0; i < table->size / 2; i++, count += 2) { value = (table->table[count].l3cc_value & 0xffff) | ((table->table[count + 1].l3cc_value & 0xffff) << 16); - intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i)); - intel_ring_emit(ringbuf, value); + intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i)); + intel_ring_emit(ring, value); } if (table->size & 0x01) { @@ -283,14 +283,14 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, * they are reserved by the hardware. */ for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) { - intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i)); - intel_ring_emit(ringbuf, value); + intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i)); + intel_ring_emit(ring, value); value = filler; } - intel_ring_emit(ringbuf, MI_NOOP); - intel_ring_advance(ringbuf); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 6dca0e470e61..cb73d16848b0 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -252,11 +252,11 @@ static int intel_overlay_on(struct intel_overlay *overlay) overlay->active = true; - intel_ring_emit(req->ringbuf, MI_OVERLAY_FLIP | MI_OVERLAY_ON); - intel_ring_emit(req->ringbuf, overlay->flip_addr | OFC_UPDATE); - intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); - intel_ring_emit(req->ringbuf, MI_NOOP); - intel_ring_advance(req->ringbuf); + intel_ring_emit(req->ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON); + intel_ring_emit(req->ring, overlay->flip_addr | OFC_UPDATE); + intel_ring_emit(req->ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + intel_ring_emit(req->ring, MI_NOOP); + intel_ring_advance(req->ring); return intel_overlay_do_wait_request(overlay, req, NULL); } @@ -293,9 +293,9 @@ static int intel_overlay_continue(struct intel_overlay *overlay, return ret; } - intel_ring_emit(req->ringbuf, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); - intel_ring_emit(req->ringbuf, flip_addr); - intel_ring_advance(req->ringbuf); + intel_ring_emit(req->ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); + intel_ring_emit(req->ring, flip_addr); + intel_ring_advance(req->ring); WARN_ON(overlay->last_flip_req); i915_gem_request_assign(&overlay->last_flip_req, req); @@ -360,22 +360,22 @@ static int intel_overlay_off(struct intel_overlay *overlay) } /* wait for overlay to go idle */ - intel_ring_emit(req->ringbuf, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); - intel_ring_emit(req->ringbuf, flip_addr); - intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + intel_ring_emit(req->ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); + intel_ring_emit(req->ring, flip_addr); + intel_ring_emit(req->ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); /* turn overlay off */ if (IS_I830(dev)) { /* Workaround: Don't disable the overlay fully, since otherwise * it dies on the next OVERLAY_ON cmd. */ - intel_ring_emit(req->ringbuf, MI_NOOP); - intel_ring_emit(req->ringbuf, MI_NOOP); - intel_ring_emit(req->ringbuf, MI_NOOP); + intel_ring_emit(req->ring, MI_NOOP); + intel_ring_emit(req->ring, MI_NOOP); + intel_ring_emit(req->ring, MI_NOOP); } else { - intel_ring_emit(req->ringbuf, MI_OVERLAY_FLIP | MI_OVERLAY_OFF); - intel_ring_emit(req->ringbuf, flip_addr); - intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + intel_ring_emit(req->ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF); + intel_ring_emit(req->ring, flip_addr); + intel_ring_emit(req->ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); } - intel_ring_advance(req->ringbuf); + intel_ring_advance(req->ring); return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail); } @@ -433,9 +433,9 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) return ret; } - intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); - intel_ring_emit(req->ringbuf, MI_NOOP); - intel_ring_advance(req->ringbuf); + intel_ring_emit(req->ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + intel_ring_emit(req->ring, MI_NOOP); + intel_ring_advance(req->ring); ret = intel_overlay_do_wait_request(overlay, req, intel_overlay_release_old_vid_tail); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 072fd0fc7748..ae00e79c9c99 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -71,7 +71,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 invalidate_domains, u32 flush_domains) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; u32 cmd; int ret; @@ -98,7 +98,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 invalidate_domains, u32 flush_domains) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; u32 cmd; int ret; @@ -191,7 +191,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, static int intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; int ret; @@ -227,7 +227,7 @@ static int gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 invalidate_domains, u32 flush_domains) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; u32 flags = 0; u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; int ret; @@ -279,7 +279,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, static int gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; int ret; ret = intel_ring_begin(req, 4); @@ -300,7 +300,7 @@ static int gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 invalidate_domains, u32 flush_domains) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; u32 flags = 0; u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; int ret; @@ -363,7 +363,7 @@ static int gen8_emit_pipe_control(struct drm_i915_gem_request *req, u32 flags, u32 scratch_addr) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; int ret; ret = intel_ring_begin(req, 6); @@ -688,7 +688,7 @@ err: static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; struct drm_i915_private *dev_priv = req->i915; struct i915_workarounds *w = &dev_priv->workarounds; int ret, i; @@ -1191,7 +1191,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, unsigned int num_dwords) { #define MBOX_UPDATE_DWORDS 8 - struct intel_ringbuffer *signaller = signaller_req->ringbuf; + struct intel_ringbuffer *signaller = signaller_req->ring; struct drm_i915_private *dev_priv = signaller_req->i915; struct intel_engine_cs *waiter; int i, ret, num_rings; @@ -1229,7 +1229,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, unsigned int num_dwords) { #define MBOX_UPDATE_DWORDS 6 - struct intel_ringbuffer *signaller = signaller_req->ringbuf; + struct intel_ringbuffer *signaller = signaller_req->ring; struct drm_i915_private *dev_priv = signaller_req->i915; struct intel_engine_cs *waiter; int i, ret, num_rings; @@ -1264,7 +1264,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, static int gen6_signal(struct drm_i915_gem_request *signaller_req, unsigned int num_dwords) { - struct intel_ringbuffer *signaller = signaller_req->ringbuf; + struct intel_ringbuffer *signaller = signaller_req->ring; struct drm_i915_private *dev_priv = signaller_req->i915; struct intel_engine_cs *useless; int i, ret, num_rings; @@ -1306,7 +1306,7 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req, static int gen6_add_request(struct drm_i915_gem_request *req) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; int ret; if (req->engine->semaphore.signal) @@ -1345,7 +1345,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req, struct intel_engine_cs *signaller, u32 seqno) { - struct intel_ringbuffer *waiter = waiter_req->ringbuf; + struct intel_ringbuffer *waiter = waiter_req->ring; struct drm_i915_private *dev_priv = waiter_req->i915; int ret; @@ -1373,7 +1373,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req, struct intel_engine_cs *signaller, u32 seqno) { - struct intel_ringbuffer *waiter = waiter_req->ringbuf; + struct intel_ringbuffer *waiter = waiter_req->ring; u32 dw1 = MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER; @@ -1421,7 +1421,7 @@ do { \ static int pc_render_add_request(struct drm_i915_gem_request *req) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; u32 addr = req->engine->status_page.gfx_addr + (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); u32 scratch_addr = addr; @@ -1548,7 +1548,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req, u32 invalidate_domains, u32 flush_domains) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; int ret; ret = intel_ring_begin(req, 2); @@ -1564,7 +1564,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req, static int i9xx_add_request(struct drm_i915_gem_request *req) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; int ret; ret = intel_ring_begin(req, 4); @@ -1658,7 +1658,7 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req, u64 offset, u32 length, unsigned dispatch_flags) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; int ret; ret = intel_ring_begin(req, 2); @@ -1685,7 +1685,7 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req, u64 offset, u32 len, unsigned dispatch_flags) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; u32 cs_offset = req->engine->scratch.gtt_offset; int ret; @@ -1748,7 +1748,7 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req, u64 offset, u32 len, unsigned dispatch_flags) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; int ret; ret = intel_ring_begin(req, 2); @@ -2082,7 +2082,7 @@ int intel_ring_idle(struct intel_engine_cs *ring) int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) { - request->ringbuf = request->engine->buffer; + request->ring = request->engine->buffer; return 0; } @@ -2135,17 +2135,17 @@ void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) static int wait_for_space(struct drm_i915_gem_request *req, int bytes) { - struct intel_ringbuffer *ringbuf = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; struct intel_engine_cs *engine = req->engine; struct drm_i915_gem_request *target; unsigned space; int ret; - if (intel_ring_space(ringbuf) >= bytes) + if (intel_ring_space(ring) >= bytes) return 0; /* The whole point of reserving space is to not wait! */ - WARN_ON(ringbuf->reserved_in_use); + WARN_ON(ring->reserved_in_use); list_for_each_entry(target, &engine->request_list, list) { /* @@ -2153,12 +2153,12 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes) * from multiple ringbuffers. Here, we must ignore any that * aren't from the ringbuffer we're considering. */ - if (target->ringbuf != ringbuf) + if (target->ring != ring) continue; /* Would completion of this request free enough space? */ - space = __intel_ring_space(target->postfix, ringbuf->tail, - ringbuf->size); + space = __intel_ring_space(target->postfix, ring->tail, + ring->size); if (space >= bytes) break; } @@ -2170,7 +2170,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes) if (ret) return ret; - ringbuf->space = space; + ring->space = space; return 0; } @@ -2185,16 +2185,16 @@ static void ring_wrap(struct intel_ringbuffer *ringbuf) static int ring_prepare(struct drm_i915_gem_request *req, int bytes) { - struct intel_ringbuffer *ringbuf = req->ringbuf; - int remain_usable = ringbuf->effective_size - ringbuf->tail; - int remain_actual = ringbuf->size - ringbuf->tail; + struct intel_ringbuffer *ring = req->ring; + int remain_usable = ring->effective_size - ring->tail; + int remain_actual = ring->size - ring->tail; int ret, total_bytes, wait_bytes = 0; bool need_wrap = false; - if (ringbuf->reserved_in_use) + if (ring->reserved_in_use) total_bytes = bytes; else - total_bytes = bytes + ringbuf->reserved_size; + total_bytes = bytes + ring->reserved_size; if (unlikely(bytes > remain_usable)) { /* @@ -2210,9 +2210,9 @@ static int ring_prepare(struct drm_i915_gem_request *req, int bytes) * falls off the end. So only need to to wait for the * reserved size after flushing out the remainder. */ - wait_bytes = remain_actual + ringbuf->reserved_size; + wait_bytes = remain_actual + ring->reserved_size; need_wrap = true; - } else if (total_bytes > ringbuf->space) { + } else if (total_bytes > ring->space) { /* No wrapping required, just waiting. */ wait_bytes = total_bytes; } @@ -2224,7 +2224,7 @@ static int ring_prepare(struct drm_i915_gem_request *req, int bytes) return ret; if (need_wrap) - ring_wrap(ringbuf); + ring_wrap(ring); } return 0; @@ -2238,14 +2238,14 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) if (ret) return ret; - req->ringbuf->space -= num_dwords * sizeof(uint32_t); + req->ring->space -= num_dwords * sizeof(uint32_t); return 0; } /* Align the ring tail to a cacheline boundary */ int intel_ring_cacheline_align(struct drm_i915_gem_request *req) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; int num_dwords = (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); int ret; @@ -2320,7 +2320,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring, static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 invalidate, u32 flush) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; uint32_t cmd; int ret; @@ -2366,7 +2366,7 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, u64 offset, u32 len, unsigned dispatch_flags) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; bool ppgtt = USES_PPGTT(req->i915) && !(dispatch_flags & I915_DISPATCH_SECURE); int ret; @@ -2392,7 +2392,7 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, u64 offset, u32 len, unsigned dispatch_flags) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; int ret; ret = intel_ring_begin(req, 2); @@ -2417,7 +2417,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, u64 offset, u32 len, unsigned dispatch_flags) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; int ret; ret = intel_ring_begin(req, 2); @@ -2440,7 +2440,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 invalidate, u32 flush) { - struct intel_ringbuffer *ring = req->ringbuf; + struct intel_ringbuffer *ring = req->ring; uint32_t cmd; int ret; -- 2.7.0.rc3 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx