Both perform the same actions with more or less indirection, so just unify the code. v2: Add back a few intel_engine_cs locals Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx> Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-11-git-send-email-chris@xxxxxxxxxxxxxxxxxx --- drivers/gpu/drm/i915/i915_gem_context.c | 47 ++-- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 53 ++-- drivers/gpu/drm/i915/i915_gem_gtt.c | 48 ++-- drivers/gpu/drm/i915/intel_display.c | 80 +++--- drivers/gpu/drm/i915/intel_lrc.c | 183 +++++++------- drivers/gpu/drm/i915/intel_lrc.h | 26 -- drivers/gpu/drm/i915/intel_mocs.c | 38 ++- drivers/gpu/drm/i915/intel_overlay.c | 50 ++-- drivers/gpu/drm/i915/intel_ringbuffer.c | 382 +++++++++++++++-------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 25 +- 10 files changed, 450 insertions(+), 482 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index bd13d084e19c..a0e24eb5e167 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -552,6 +552,7 @@ static inline int mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) { struct drm_i915_private *dev_priv = req->i915; + struct intel_ringbuffer *ring = req->ringbuf; struct intel_engine_cs *engine = req->engine; u32 flags = hw_flags | MI_MM_SPACE_GTT; const int num_rings = @@ -589,64 +590,64 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ if (INTEL_GEN(dev_priv) >= 7) { - intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE); + intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE); if (num_rings) { struct intel_engine_cs *signaller; - intel_ring_emit(engine, + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); for_each_engine(signaller, dev_priv) { if (signaller == engine) continue; - intel_ring_emit_reg(engine, + intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base)); - intel_ring_emit(engine, + intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); } } } - intel_ring_emit(engine, MI_NOOP); - intel_ring_emit(engine, MI_SET_CONTEXT); - intel_ring_emit(engine, + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_SET_CONTEXT); + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) | flags); /* * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP * WaMiSetContext_Hang:snb,ivb,vlv */ - intel_ring_emit(engine, MI_NOOP); + intel_ring_emit(ring, MI_NOOP); if (INTEL_GEN(dev_priv) >= 7) { if (num_rings) { struct intel_engine_cs *signaller; i915_reg_t last_reg = {}; /* keep gcc quiet */ - intel_ring_emit(engine, + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); for_each_engine(signaller, dev_priv) { if (signaller == engine) continue; last_reg = RING_PSMI_CTL(signaller->mmio_base); - intel_ring_emit_reg(engine, last_reg); - intel_ring_emit(engine, + intel_ring_emit_reg(ring, last_reg); + intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); } /* Insert a delay before the next switch! */ - intel_ring_emit(engine, + intel_ring_emit(ring, MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT); - intel_ring_emit_reg(engine, last_reg); - intel_ring_emit(engine, engine->scratch.gtt_offset); - intel_ring_emit(engine, MI_NOOP); + intel_ring_emit_reg(ring, last_reg); + intel_ring_emit(ring, engine->scratch.gtt_offset); + intel_ring_emit(ring, MI_NOOP); } - intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE); + intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); } - intel_ring_advance(engine); + intel_ring_advance(ring); return ret; } @@ -654,7 +655,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) static int remap_l3(struct drm_i915_gem_request *req, int slice) { u32 *remap_info = req->i915->l3_parity.remap_info[slice]; - struct intel_engine_cs *engine = req->engine; + struct intel_ringbuffer *ring = req->ringbuf; int i, ret; if (!remap_info) @@ -669,13 +670,13 @@ static int remap_l3(struct drm_i915_gem_request *req, int slice) * here because no other code should access these registers other than * at initialization time. */ - intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4)); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4)); for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) { - intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i)); - intel_ring_emit(engine, remap_info[i]); + intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i)); + intel_ring_emit(ring, remap_info[i]); } - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index aa35867f3032..2f9f0daa1bc2 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1171,14 +1171,12 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params) } static int -i915_reset_gen7_sol_offsets(struct drm_device *dev, - struct drm_i915_gem_request *req) +i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req) { - struct intel_engine_cs *engine = req->engine; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_ringbuffer *ring = req->ringbuf; int ret, i; - if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) { + if (!IS_GEN7(req->i915) || req->engine->id != RCS) { DRM_DEBUG("sol reset is gen7/rcs only\n"); return -EINVAL; } @@ -1188,12 +1186,12 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev, return ret; for (i = 0; i < 4; i++) { - intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(engine, GEN7_SO_WRITE_OFFSET(i)); - intel_ring_emit(engine, 0); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i)); + intel_ring_emit(ring, 0); } - intel_ring_advance(engine); + intel_ring_advance(ring); return 0; } @@ -1256,9 +1254,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, struct drm_i915_gem_execbuffer2 *args, struct list_head *vmas) { - struct drm_device *dev = params->dev; - struct intel_engine_cs *engine = params->engine; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = params->request->i915; u64 exec_start, exec_len; int instp_mode; u32 instp_mask; @@ -1272,34 +1268,31 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, if (ret) return ret; - WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id), - "%s didn't clear reload\n", engine->name); - instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; instp_mask = I915_EXEC_CONSTANTS_MASK; switch (instp_mode) { case I915_EXEC_CONSTANTS_REL_GENERAL: case I915_EXEC_CONSTANTS_ABSOLUTE: case I915_EXEC_CONSTANTS_REL_SURFACE: - if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) { + if (instp_mode != 0 && params->engine->id != RCS) { DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); return -EINVAL; } if (instp_mode != dev_priv->relative_constants_mode) { - if (INTEL_INFO(dev)->gen < 4) { + if (INTEL_INFO(dev_priv)->gen < 4) { DRM_DEBUG("no rel constants on pre-gen4\n"); return -EINVAL; } - if (INTEL_INFO(dev)->gen > 5 && + if (INTEL_INFO(dev_priv)->gen > 5 && instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) { DRM_DEBUG("rel surface constants mode invalid on gen5+\n"); return -EINVAL; } /* The HW changed the meaning on this bit on gen6 */ - if (INTEL_INFO(dev)->gen >= 6) + if (INTEL_INFO(dev_priv)->gen >= 6) instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE; } break; @@ -1308,23 +1301,25 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, return -EINVAL; } - if (engine == &dev_priv->engine[RCS] && + if (params->engine->id == RCS && instp_mode != dev_priv->relative_constants_mode) { + struct intel_ringbuffer *ring = params->request->ringbuf; + ret = intel_ring_begin(params->request, 4); if (ret) return ret; - intel_ring_emit(engine, MI_NOOP); - intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(engine, INSTPM); - intel_ring_emit(engine, instp_mask << 16 | instp_mode); - intel_ring_advance(engine); + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit_reg(ring, INSTPM); + intel_ring_emit(ring, instp_mask << 16 | instp_mode); + intel_ring_advance(ring); dev_priv->relative_constants_mode = instp_mode; } if (args->flags & I915_EXEC_GEN7_SOL_RESET) { - ret = i915_reset_gen7_sol_offsets(dev, params->request); + ret = i915_reset_gen7_sol_offsets(params->request); if (ret) return ret; } @@ -1336,9 +1331,9 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, if (exec_len == 0) exec_len = params->batch_obj->base.size; - ret = engine->dispatch_execbuffer(params->request, - exec_start, exec_len, - params->dispatch_flags); + ret = params->engine->dispatch_execbuffer(params->request, + exec_start, exec_len, + params->dispatch_flags); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 38e7d992a20d..b38a5311f996 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -669,6 +669,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req, unsigned entry, dma_addr_t addr) { + struct intel_ringbuffer *ring = req->ringbuf; struct intel_engine_cs *engine = req->engine; int ret; @@ -678,13 +679,13 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req, if (ret) return ret; - intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(engine, GEN8_RING_PDP_UDW(engine, entry)); - intel_ring_emit(engine, upper_32_bits(addr)); - intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(engine, GEN8_RING_PDP_LDW(engine, entry)); - intel_ring_emit(engine, lower_32_bits(addr)); - intel_ring_advance(engine); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, entry)); + intel_ring_emit(ring, upper_32_bits(addr)); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, entry)); + intel_ring_emit(ring, lower_32_bits(addr)); + intel_ring_advance(ring); return 0; } @@ -1660,6 +1661,7 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_request *req) { + struct intel_ringbuffer *ring = req->ringbuf; struct intel_engine_cs *engine = req->engine; int ret; @@ -1672,13 +1674,13 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, if (ret) return ret; - intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2)); - intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine)); - intel_ring_emit(engine, PP_DIR_DCLV_2G); - intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine)); - intel_ring_emit(engine, get_pd_offset(ppgtt)); - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); + intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine)); + intel_ring_emit(ring, PP_DIR_DCLV_2G); + intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine)); + intel_ring_emit(ring, get_pd_offset(ppgtt)); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } @@ -1686,6 +1688,7 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_request *req) { + struct intel_ringbuffer *ring = req->ringbuf; struct intel_engine_cs *engine = req->engine; int ret; @@ -1698,17 +1701,18 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, if (ret) return ret; - intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2)); - intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine)); - intel_ring_emit(engine, PP_DIR_DCLV_2G); - intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine)); - intel_ring_emit(engine, get_pd_offset(ppgtt)); - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); + intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine)); + intel_ring_emit(ring, PP_DIR_DCLV_2G); + intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine)); + intel_ring_emit(ring, get_pd_offset(ppgtt)); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); /* XXX: RCS is the only one to auto invalidate the TLBs? */ if (engine->id != RCS) { - ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); + ret = engine->flush(req, + I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); if (ret) return ret; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 78beb7e9d384..d18ed32e6a31 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -11123,7 +11123,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_engine_cs *engine = req->engine; + struct intel_ringbuffer *ring = req->ringbuf; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); u32 flip_mask; int ret; @@ -11139,13 +11139,13 @@ static int intel_gen2_queue_flip(struct drm_device *dev, flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; else flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; - intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask); - intel_ring_emit(engine, MI_NOOP); - intel_ring_emit(engine, MI_DISPLAY_FLIP | + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - intel_ring_emit(engine, fb->pitches[0]); - intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); - intel_ring_emit(engine, 0); /* aux display base address, unused */ + intel_ring_emit(ring, fb->pitches[0]); + intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset); + intel_ring_emit(ring, 0); /* aux display base address, unused */ return 0; } @@ -11157,7 +11157,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_engine_cs *engine = req->engine; + struct intel_ringbuffer *ring = req->ringbuf; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); u32 flip_mask; int ret; @@ -11170,13 +11170,13 @@ static int intel_gen3_queue_flip(struct drm_device *dev, flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; else flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; - intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask); - intel_ring_emit(engine, MI_NOOP); - intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - intel_ring_emit(engine, fb->pitches[0]); - intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); - intel_ring_emit(engine, MI_NOOP); + intel_ring_emit(ring, fb->pitches[0]); + intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset); + intel_ring_emit(ring, MI_NOOP); return 0; } @@ -11188,7 +11188,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_engine_cs *engine = req->engine; + struct intel_ringbuffer *ring = req->ringbuf; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t pf, pipesrc; @@ -11202,10 +11202,10 @@ static int intel_gen4_queue_flip(struct drm_device *dev, * Display Registers (which do not change across a page-flip) * so we need only reprogram the base address. */ - intel_ring_emit(engine, MI_DISPLAY_FLIP | + intel_ring_emit(ring, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - intel_ring_emit(engine, fb->pitches[0]); - intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset | + intel_ring_emit(ring, fb->pitches[0]); + intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset | obj->tiling_mode); /* XXX Enabling the panel-fitter across page-flip is so far @@ -11214,7 +11214,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, */ pf = 0; pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; - intel_ring_emit(engine, pf | pipesrc); + intel_ring_emit(ring, pf | pipesrc); return 0; } @@ -11226,7 +11226,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_engine_cs *engine = req->engine; + struct intel_ringbuffer *ring = req->ringbuf; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t pf, pipesrc; @@ -11236,10 +11236,10 @@ static int intel_gen6_queue_flip(struct drm_device *dev, if (ret) return ret; - intel_ring_emit(engine, MI_DISPLAY_FLIP | + intel_ring_emit(ring, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode); - intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); + intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); + intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset); /* Contrary to the suggestions in the documentation, * "Enable Panel Fitter" does not seem to be required when page @@ -11249,7 +11249,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, */ pf = 0; pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; - intel_ring_emit(engine, pf | pipesrc); + intel_ring_emit(ring, pf | pipesrc); return 0; } @@ -11261,7 +11261,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_engine_cs *engine = req->engine; + struct intel_ringbuffer *ring = req->ringbuf; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t plane_bit = 0; int len, ret; @@ -11282,7 +11282,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, } len = 4; - if (engine->id == RCS) { + if (req->engine->id == RCS) { len += 6; /* * On Gen 8, SRM is now taking an extra dword to accommodate @@ -11320,30 +11320,30 @@ static int intel_gen7_queue_flip(struct drm_device *dev, * for the RCS also doesn't appear to drop events. Setting the DERRMR * to zero does lead to lockups within MI_DISPLAY_FLIP. */ - if (engine->id == RCS) { - intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(engine, DERRMR); - intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE | + if (req->engine->id == RCS) { + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit_reg(ring, DERRMR); + intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | DERRMR_PIPEB_PRI_FLIP_DONE | DERRMR_PIPEC_PRI_FLIP_DONE)); if (IS_GEN8(dev)) - intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 | + intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT); else - intel_ring_emit(engine, MI_STORE_REGISTER_MEM | + intel_ring_emit(ring, MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT); - intel_ring_emit_reg(engine, DERRMR); - intel_ring_emit(engine, engine->scratch.gtt_offset + 256); + intel_ring_emit_reg(ring, DERRMR); + intel_ring_emit(ring, req->engine->scratch.gtt_offset + 256); if (IS_GEN8(dev)) { - intel_ring_emit(engine, 0); - intel_ring_emit(engine, MI_NOOP); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, MI_NOOP); } } - intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit); - intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode)); - intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); - intel_ring_emit(engine, (MI_NOOP)); + intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); + intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); + intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset); + intel_ring_emit(ring, (MI_NOOP)); return 0; } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index dd3f490556dc..d851b4eef139 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -773,7 +773,7 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) struct intel_ringbuffer *ringbuf = request->ringbuf; struct intel_engine_cs *engine = request->engine; - intel_logical_ring_advance(ringbuf); + intel_ring_advance(ringbuf); request->tail = ringbuf->tail; /* @@ -782,9 +782,9 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) * * Caller must reserve WA_TAIL_DWORDS for us! */ - intel_logical_ring_emit(ringbuf, MI_NOOP); - intel_logical_ring_emit(ringbuf, MI_NOOP); - intel_logical_ring_advance(ringbuf); + intel_ring_emit(ringbuf, MI_NOOP); + intel_ring_emit(ringbuf, MI_NOOP); + intel_ring_advance(ringbuf); /* We keep the previous context alive until we retire the following * request. This ensures that any the context object is still pinned @@ -868,11 +868,11 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, if (ret) return ret; - intel_logical_ring_emit(ringbuf, MI_NOOP); - intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1)); - intel_logical_ring_emit_reg(ringbuf, INSTPM); - intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode); - intel_logical_ring_advance(ringbuf); + intel_ring_emit(ringbuf, MI_NOOP); + intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit_reg(ringbuf, INSTPM); + intel_ring_emit(ringbuf, instp_mask << 16 | instp_mode); + intel_ring_advance(ringbuf); dev_priv->relative_constants_mode = instp_mode; } @@ -1045,14 +1045,14 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) if (ret) return ret; - intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count)); + intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count)); for (i = 0; i < w->count; i++) { - intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr); - intel_logical_ring_emit(ringbuf, w->reg[i].value); + intel_ring_emit_reg(ringbuf, w->reg[i].addr); + intel_ring_emit(ringbuf, w->reg[i].value); } - intel_logical_ring_emit(ringbuf, MI_NOOP); + intel_ring_emit(ringbuf, MI_NOOP); - intel_logical_ring_advance(ringbuf); + intel_ring_advance(ringbuf); engine->gpu_caches_dirty = true; ret = logical_ring_flush_all_caches(req); @@ -1553,8 +1553,8 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine) static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) { struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt; + struct intel_ringbuffer *ring = req->ringbuf; struct intel_engine_cs *engine = req->engine; - struct intel_ringbuffer *ringbuf = req->ringbuf; const int num_lri_cmds = GEN8_LEGACY_PDPES * 2; int i, ret; @@ -1562,20 +1562,18 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) if (ret) return ret; - intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds)); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds)); for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) { const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); - intel_logical_ring_emit_reg(ringbuf, - GEN8_RING_PDP_UDW(engine, i)); - intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr)); - intel_logical_ring_emit_reg(ringbuf, - GEN8_RING_PDP_LDW(engine, i)); - intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr)); + intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i)); + intel_ring_emit(ring, upper_32_bits(pd_daddr)); + intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i)); + intel_ring_emit(ring, lower_32_bits(pd_daddr)); } - intel_logical_ring_emit(ringbuf, MI_NOOP); - intel_logical_ring_advance(ringbuf); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } @@ -1583,7 +1581,7 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) static int gen8_emit_bb_start(struct drm_i915_gem_request *req, u64 offset, unsigned dispatch_flags) { - struct intel_ringbuffer *ringbuf = req->ringbuf; + struct intel_ringbuffer *ring = req->ringbuf; bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE); int ret; @@ -1610,14 +1608,14 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, return ret; /* FIXME(BDW): Address space and security selectors. */ - intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | - (ppgtt<<8) | - (dispatch_flags & I915_DISPATCH_RS ? - MI_BATCH_RESOURCE_STREAMER : 0)); - intel_logical_ring_emit(ringbuf, lower_32_bits(offset)); - intel_logical_ring_emit(ringbuf, upper_32_bits(offset)); - intel_logical_ring_emit(ringbuf, MI_NOOP); - intel_logical_ring_advance(ringbuf); + intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | + (ppgtt<<8) | + (dispatch_flags & I915_DISPATCH_RS ? + MI_BATCH_RESOURCE_STREAMER : 0)); + intel_ring_emit(ring, lower_32_bits(offset)); + intel_ring_emit(ring, upper_32_bits(offset)); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } @@ -1640,9 +1638,8 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 invalidate_domains, u32 unused) { - struct intel_ringbuffer *ringbuf = request->ringbuf; - struct intel_engine_cs *engine = ringbuf->engine; - struct drm_i915_private *dev_priv = request->i915; + struct intel_ringbuffer *ring = request->ringbuf; + struct intel_engine_cs *engine = ring->engine; uint32_t cmd; int ret; @@ -1661,17 +1658,17 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, if (invalidate_domains & I915_GEM_GPU_DOMAINS) { cmd |= MI_INVALIDATE_TLB; - if (engine == &dev_priv->engine[VCS]) + if (engine->id == VCS) cmd |= MI_INVALIDATE_BSD; } - intel_logical_ring_emit(ringbuf, cmd); - intel_logical_ring_emit(ringbuf, - I915_GEM_HWS_SCRATCH_ADDR | - MI_FLUSH_DW_USE_GTT); - intel_logical_ring_emit(ringbuf, 0); /* upper addr */ - intel_logical_ring_emit(ringbuf, 0); /* value */ - intel_logical_ring_advance(ringbuf); + intel_ring_emit(ring, cmd); + intel_ring_emit(ring, + I915_GEM_HWS_SCRATCH_ADDR | + MI_FLUSH_DW_USE_GTT); + intel_ring_emit(ring, 0); /* upper addr */ + intel_ring_emit(ring, 0); /* value */ + intel_ring_advance(ring); return 0; } @@ -1680,8 +1677,8 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, u32 invalidate_domains, u32 flush_domains) { - struct intel_ringbuffer *ringbuf = request->ringbuf; - struct intel_engine_cs *engine = ringbuf->engine; + struct intel_ringbuffer *ring = request->ringbuf; + struct intel_engine_cs *engine = request->engine; u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; bool vf_flush_wa = false, dc_flush_wa = false; u32 flags = 0; @@ -1732,40 +1729,40 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, return ret; if (vf_flush_wa) { - intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, 0); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); } if (dc_flush_wa) { - intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); - intel_logical_ring_emit(ringbuf, PIPE_CONTROL_DC_FLUSH_ENABLE); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, 0); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); } - intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); - intel_logical_ring_emit(ringbuf, flags); - intel_logical_ring_emit(ringbuf, scratch_addr); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, 0); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(ring, flags); + intel_ring_emit(ring, scratch_addr); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); if (dc_flush_wa) { - intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); - intel_logical_ring_emit(ringbuf, PIPE_CONTROL_CS_STALL); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, 0); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(ring, PIPE_CONTROL_CS_STALL); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); } - intel_logical_ring_advance(ringbuf); + intel_ring_advance(ring); return 0; } @@ -1794,7 +1791,7 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine) static int gen8_emit_request(struct drm_i915_gem_request *request) { - struct intel_ringbuffer *ringbuf = request->ringbuf; + struct intel_ringbuffer *ring = request->ringbuf; int ret; ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS); @@ -1804,21 +1801,20 @@ static int gen8_emit_request(struct drm_i915_gem_request *request) /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */ BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5)); - intel_logical_ring_emit(ringbuf, - (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); - intel_logical_ring_emit(ringbuf, - intel_hws_seqno_address(request->engine) | - MI_FLUSH_DW_USE_GTT); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, request->fence.seqno); - intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); - intel_logical_ring_emit(ringbuf, MI_NOOP); + intel_ring_emit(ring, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); + intel_ring_emit(ring, + intel_hws_seqno_address(request->engine) | + MI_FLUSH_DW_USE_GTT); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, request->fence.seqno); + intel_ring_emit(ring, MI_USER_INTERRUPT); + intel_ring_emit(ring, MI_NOOP); return intel_logical_ring_advance_and_submit(request); } static int gen8_emit_request_render(struct drm_i915_gem_request *request) { - struct intel_ringbuffer *ringbuf = request->ringbuf; + struct intel_ringbuffer *ring = request->ringbuf; int ret; ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS); @@ -1832,19 +1828,18 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request) * need a prior CS_STALL, which is emitted by the flush * following the batch. */ - intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); - intel_logical_ring_emit(ringbuf, - (PIPE_CONTROL_GLOBAL_GTT_IVB | - PIPE_CONTROL_CS_STALL | - PIPE_CONTROL_QW_WRITE)); - intel_logical_ring_emit(ringbuf, - intel_hws_seqno_address(request->engine)); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(ring, + (PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_QW_WRITE)); + intel_ring_emit(ring, intel_hws_seqno_address(request->engine)); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, i915_gem_request_get_seqno(request)); /* We're thrashing one dword of HWS. */ - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); - intel_logical_ring_emit(ringbuf, MI_NOOP); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, MI_USER_INTERRUPT); + intel_ring_emit(ring, MI_NOOP); return intel_logical_ring_advance_and_submit(request); } diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 38287302c5ba..d26fb44549e5 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -73,32 +73,6 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine); int intel_engines_init(struct drm_device *dev); int logical_ring_flush_all_caches(struct drm_i915_gem_request *req); -/** - * intel_logical_ring_advance() - advance the ringbuffer tail - * @ringbuf: Ringbuffer to advance. - * - * The tail is only updated in our logical ringbuffer struct. - */ -static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf) -{ - __intel_ringbuffer_advance(ringbuf); -} - -/** - * intel_logical_ring_emit() - write a DWORD to the ringbuffer. - * @ringbuf: Ringbuffer to write to. - * @data: DWORD to write. - */ -static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf, - u32 data) -{ - __intel_ringbuffer_emit(ringbuf, data); -} -static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf, - i915_reg_t reg) -{ - intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg)); -} /* Logical Ring Contexts */ diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index bd46968d8a07..3059c52030b4 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c @@ -288,14 +288,11 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req, if (ret) return ret; - intel_logical_ring_emit(ringbuf, - MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES)); + intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES)); for (index = 0; index < table->size; index++) { - intel_logical_ring_emit_reg(ringbuf, - mocs_register(engine, index)); - intel_logical_ring_emit(ringbuf, - table->table[index].control_value); + intel_ring_emit_reg(ringbuf, mocs_register(engine, index)); + intel_ring_emit(ringbuf, table->table[index].control_value); } /* @@ -307,14 +304,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req, * that value to all the used entries. */ for (; index < GEN9_NUM_MOCS_ENTRIES; index++) { - intel_logical_ring_emit_reg(ringbuf, - mocs_register(engine, index)); - intel_logical_ring_emit(ringbuf, - table->table[0].control_value); + intel_ring_emit_reg(ringbuf, mocs_register(engine, index)); + intel_ring_emit(ringbuf, table->table[0].control_value); } - intel_logical_ring_emit(ringbuf, MI_NOOP); - intel_logical_ring_advance(ringbuf); + intel_ring_emit(ringbuf, MI_NOOP); + intel_ring_advance(ringbuf); return 0; } @@ -352,19 +347,18 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, if (ret) return ret; - intel_logical_ring_emit(ringbuf, + intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2)); for (i = 0; i < table->size/2; i++) { - intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i)); - intel_logical_ring_emit(ringbuf, - l3cc_combine(table, 2*i, 2*i+1)); + intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i)); + intel_ring_emit(ringbuf, l3cc_combine(table, 2*i, 2*i+1)); } if (table->size & 0x01) { /* Odd table size - 1 left over */ - intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i)); - intel_logical_ring_emit(ringbuf, l3cc_combine(table, 2*i, 0)); + intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i)); + intel_ring_emit(ringbuf, l3cc_combine(table, 2*i, 0)); i++; } @@ -374,12 +368,12 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, * they are reserved by the hardware. */ for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) { - intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i)); - intel_logical_ring_emit(ringbuf, l3cc_combine(table, 0, 0)); + intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i)); + intel_ring_emit(ringbuf, l3cc_combine(table, 0, 0)); } - intel_logical_ring_emit(ringbuf, MI_NOOP); - intel_logical_ring_advance(ringbuf); + intel_ring_emit(ringbuf, MI_NOOP); + intel_ring_advance(ringbuf); return 0; } diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index c10ce368787e..ec63b64fb202 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -235,6 +235,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) struct drm_i915_private *dev_priv = overlay->i915; struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct drm_i915_gem_request *req; + struct intel_ringbuffer *ring; int ret; WARN_ON(overlay->active); @@ -252,11 +253,12 @@ static int intel_overlay_on(struct intel_overlay *overlay) overlay->active = true; - intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_ON); - intel_ring_emit(engine, overlay->flip_addr | OFC_UPDATE); - intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + ring = req->ringbuf; + intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON); + intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE); + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return intel_overlay_do_wait_request(overlay, req, NULL); } @@ -268,6 +270,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, struct drm_i915_private *dev_priv = overlay->i915; struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct drm_i915_gem_request *req; + struct intel_ringbuffer *ring; u32 flip_addr = overlay->flip_addr; u32 tmp; int ret; @@ -292,9 +295,10 @@ static int intel_overlay_continue(struct intel_overlay *overlay, return ret; } - intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); - intel_ring_emit(engine, flip_addr); - intel_ring_advance(engine); + ring = req->ringbuf; + intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); + intel_ring_emit(ring, flip_addr); + intel_ring_advance(ring); WARN_ON(overlay->last_flip_req); i915_gem_request_assign(&overlay->last_flip_req, req); @@ -336,6 +340,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) struct drm_i915_private *dev_priv = overlay->i915; struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct drm_i915_gem_request *req; + struct intel_ringbuffer *ring; u32 flip_addr = overlay->flip_addr; int ret; @@ -357,24 +362,25 @@ static int intel_overlay_off(struct intel_overlay *overlay) return ret; } + ring = req->ringbuf; /* wait for overlay to go idle */ - intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); - intel_ring_emit(engine, flip_addr); - intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); + intel_ring_emit(ring, flip_addr); + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); /* turn overlay off */ if (IS_I830(dev_priv)) { /* Workaround: Don't disable the overlay fully, since otherwise * it dies on the next OVERLAY_ON cmd. */ - intel_ring_emit(engine, MI_NOOP); - intel_ring_emit(engine, MI_NOOP); - intel_ring_emit(engine, MI_NOOP); + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_NOOP); } else { - intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_OFF); - intel_ring_emit(engine, flip_addr); - intel_ring_emit(engine, + intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF); + intel_ring_emit(ring, flip_addr); + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); } - intel_ring_advance(engine); + intel_ring_advance(ring); return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail); } @@ -420,6 +426,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) { /* synchronous slowpath */ struct drm_i915_gem_request *req; + struct intel_ringbuffer *ring; req = i915_gem_request_alloc(engine, NULL); if (IS_ERR(req)) @@ -431,10 +438,11 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) return ret; } - intel_ring_emit(engine, + ring = req->ringbuf; + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); ret = intel_overlay_do_wait_request(overlay, req, intel_overlay_release_old_vid_tail); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 15acaf617303..12670068f081 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -58,7 +58,7 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf) ringbuf->tail, ringbuf->size); } -static void __intel_ring_advance(struct intel_engine_cs *engine) +static void __intel_engine_submit(struct intel_engine_cs *engine) { struct intel_ringbuffer *ringbuf = engine->buffer; ringbuf->tail &= ringbuf->size - 1; @@ -70,7 +70,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 invalidate_domains, u32 flush_domains) { - struct intel_engine_cs *engine = req->engine; + struct intel_ringbuffer *ring = req->ringbuf; u32 cmd; int ret; @@ -85,9 +85,9 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req, if (ret) return ret; - intel_ring_emit(engine, cmd); - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + intel_ring_emit(ring, cmd); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } @@ -97,7 +97,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 invalidate_domains, u32 flush_domains) { - struct intel_engine_cs *engine = req->engine; + struct intel_ringbuffer *ring = req->ringbuf; u32 cmd; int ret; @@ -129,23 +129,20 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, * are flushed at any MI_FLUSH. */ - cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; - if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) - cmd &= ~MI_NO_WRITE_FLUSH; - if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) + cmd = MI_FLUSH; + if (invalidate_domains) { cmd |= MI_EXE_FLUSH; - - if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && - (IS_G4X(req->i915) || IS_GEN5(req->i915))) - cmd |= MI_INVALIDATE_ISP; + if (IS_G4X(req->i915) || IS_GEN5(req->i915)) + cmd |= MI_INVALIDATE_ISP; + } ret = intel_ring_begin(req, 2); if (ret) return ret; - intel_ring_emit(engine, cmd); - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + intel_ring_emit(ring, cmd); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } @@ -190,34 +187,35 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, static int intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req) { - struct intel_engine_cs *engine = req->engine; - u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; + struct intel_ringbuffer *ring = req->ringbuf; + u32 scratch_addr = + req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; int ret; ret = intel_ring_begin(req, 6); if (ret) return ret; - intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5)); - intel_ring_emit(engine, PIPE_CONTROL_CS_STALL | + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); + intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD); - intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ - intel_ring_emit(engine, 0); /* low dword */ - intel_ring_emit(engine, 0); /* high dword */ - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, 0); /* low dword */ + intel_ring_emit(ring, 0); /* high dword */ + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); ret = intel_ring_begin(req, 6); if (ret) return ret; - intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5)); - intel_ring_emit(engine, PIPE_CONTROL_QW_WRITE); - intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ - intel_ring_emit(engine, 0); - intel_ring_emit(engine, 0); - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); + intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); + intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } @@ -226,9 +224,10 @@ static int gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 invalidate_domains, u32 flush_domains) { - struct intel_engine_cs *engine = req->engine; + struct intel_ringbuffer *ring = req->ringbuf; + u32 scratch_addr = + req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; u32 flags = 0; - u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; int ret; /* Force SNB workarounds for PIPE_CONTROL flushes */ @@ -266,11 +265,11 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, if (ret) return ret; - intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4)); - intel_ring_emit(engine, flags); - intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(engine, 0); - intel_ring_advance(engine); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); + intel_ring_emit(ring, flags); + intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); return 0; } @@ -278,19 +277,20 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, static int gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req) { - struct intel_engine_cs *engine = req->engine; + struct intel_ringbuffer *ring = req->ringbuf; int ret; ret = intel_ring_begin(req, 4); if (ret) return ret; - intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4)); - intel_ring_emit(engine, PIPE_CONTROL_CS_STALL | - PIPE_CONTROL_STALL_AT_SCOREBOARD); - intel_ring_emit(engine, 0); - intel_ring_emit(engine, 0); - intel_ring_advance(engine); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); + intel_ring_emit(ring, + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_STALL_AT_SCOREBOARD); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); return 0; } @@ -299,9 +299,10 @@ static int gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 invalidate_domains, u32 flush_domains) { - struct intel_engine_cs *engine = req->engine; + struct intel_ringbuffer *ring = req->ringbuf; + u32 scratch_addr = + req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; u32 flags = 0; - u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; int ret; /* @@ -350,11 +351,11 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, if (ret) return ret; - intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4)); - intel_ring_emit(engine, flags); - intel_ring_emit(engine, scratch_addr); - intel_ring_emit(engine, 0); - intel_ring_advance(engine); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); + intel_ring_emit(ring, flags); + intel_ring_emit(ring, scratch_addr); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); return 0; } @@ -363,20 +364,20 @@ static int gen8_emit_pipe_control(struct drm_i915_gem_request *req, u32 flags, u32 scratch_addr) { - struct intel_engine_cs *engine = req->engine; + struct intel_ringbuffer *ring = req->ringbuf; int ret; ret = intel_ring_begin(req, 6); if (ret) return ret; - intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6)); - intel_ring_emit(engine, flags); - intel_ring_emit(engine, scratch_addr); - intel_ring_emit(engine, 0); - intel_ring_emit(engine, 0); - intel_ring_emit(engine, 0); - intel_ring_advance(engine); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(ring, flags); + intel_ring_emit(ring, scratch_addr); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); return 0; } @@ -385,8 +386,8 @@ static int gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 invalidate_domains, u32 flush_domains) { - u32 flags = 0; u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; + u32 flags = 0; int ret; flags |= PIPE_CONTROL_CS_STALL; @@ -679,14 +680,14 @@ err: static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) { - struct intel_engine_cs *engine = req->engine; + struct intel_ringbuffer *ring = req->ringbuf; struct i915_workarounds *w = &req->i915->workarounds; int ret, i; if (w->count == 0) return 0; - engine->gpu_caches_dirty = true; + req->engine->gpu_caches_dirty = true; ret = intel_ring_flush_all_caches(req); if (ret) return ret; @@ -695,16 +696,16 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) if (ret) return ret; - intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(w->count)); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count)); for (i = 0; i < w->count; i++) { - intel_ring_emit_reg(engine, w->reg[i].addr); - intel_ring_emit(engine, w->reg[i].value); + intel_ring_emit_reg(ring, w->reg[i].addr); + intel_ring_emit(ring, w->reg[i].value); } - intel_ring_emit(engine, MI_NOOP); + intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(engine); + intel_ring_advance(ring); - engine->gpu_caches_dirty = true; + req->engine->gpu_caches_dirty = true; ret = intel_ring_flush_all_caches(req); if (ret) return ret; @@ -1337,7 +1338,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, unsigned int num_dwords) { #define MBOX_UPDATE_DWORDS 8 - struct intel_engine_cs *signaller = signaller_req->engine; + struct intel_ringbuffer *signaller = signaller_req->ringbuf; struct drm_i915_private *dev_priv = signaller_req->i915; struct intel_engine_cs *waiter; enum intel_engine_id id; @@ -1352,20 +1353,23 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, return ret; for_each_engine_id(waiter, dev_priv, id) { - u64 gtt_offset = signaller->semaphore.signal_ggtt[id]; + u64 gtt_offset = + signaller_req->engine->semaphore.signal_ggtt[id]; if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) continue; intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); - intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | - PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_CS_STALL); + intel_ring_emit(signaller, + PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_CS_STALL); intel_ring_emit(signaller, lower_32_bits(gtt_offset)); intel_ring_emit(signaller, upper_32_bits(gtt_offset)); intel_ring_emit(signaller, signaller_req->fence.seqno); intel_ring_emit(signaller, 0); - intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | - MI_SEMAPHORE_TARGET(waiter->hw_id)); + intel_ring_emit(signaller, + MI_SEMAPHORE_SIGNAL | + MI_SEMAPHORE_TARGET(waiter->hw_id)); intel_ring_emit(signaller, 0); } @@ -1376,7 +1380,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, unsigned int num_dwords) { #define MBOX_UPDATE_DWORDS 6 - struct intel_engine_cs *signaller = signaller_req->engine; + struct intel_ringbuffer *signaller = signaller_req->ringbuf; struct drm_i915_private *dev_priv = signaller_req->i915; struct intel_engine_cs *waiter; enum intel_engine_id id; @@ -1391,18 +1395,21 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, return ret; for_each_engine_id(waiter, dev_priv, id) { - u64 gtt_offset = signaller->semaphore.signal_ggtt[id]; + u64 gtt_offset = + signaller_req->engine->semaphore.signal_ggtt[id]; if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) continue; - intel_ring_emit(signaller, (MI_FLUSH_DW + 1) | - MI_FLUSH_DW_OP_STOREDW); - intel_ring_emit(signaller, lower_32_bits(gtt_offset) | - MI_FLUSH_DW_USE_GTT); + intel_ring_emit(signaller, + (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); + intel_ring_emit(signaller, + lower_32_bits(gtt_offset) | + MI_FLUSH_DW_USE_GTT); intel_ring_emit(signaller, upper_32_bits(gtt_offset)); intel_ring_emit(signaller, signaller_req->fence.seqno); - intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | - MI_SEMAPHORE_TARGET(waiter->hw_id)); + intel_ring_emit(signaller, + MI_SEMAPHORE_SIGNAL | + MI_SEMAPHORE_TARGET(waiter->hw_id)); intel_ring_emit(signaller, 0); } @@ -1412,7 +1419,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, static int gen6_signal(struct drm_i915_gem_request *signaller_req, unsigned int num_dwords) { - struct intel_engine_cs *signaller = signaller_req->engine; + struct intel_ringbuffer *signaller = signaller_req->ringbuf; struct drm_i915_private *dev_priv = signaller_req->i915; struct intel_engine_cs *useless; enum intel_engine_id id; @@ -1428,7 +1435,8 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req, return ret; for_each_engine_id(useless, dev_priv, id) { - i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id]; + i915_reg_t mbox_reg = + signaller_req->engine->semaphore.mbox.signal[id]; if (i915_mmio_reg_valid(mbox_reg)) { intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); @@ -1456,6 +1464,7 @@ static int gen6_add_request(struct drm_i915_gem_request *req) { struct intel_engine_cs *engine = req->engine; + struct intel_ringbuffer *ring = req->ringbuf; int ret; if (engine->semaphore.signal) @@ -1466,12 +1475,11 @@ gen6_add_request(struct drm_i915_gem_request *req) if (ret) return ret; - intel_ring_emit(engine, MI_STORE_DWORD_INDEX); - intel_ring_emit(engine, - I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(engine, req->fence.seqno); - intel_ring_emit(engine, MI_USER_INTERRUPT); - __intel_ring_advance(engine); + intel_ring_emit(ring, MI_STORE_DWORD_INDEX); + intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(ring, req->fence.seqno); + intel_ring_emit(ring, MI_USER_INTERRUPT); + __intel_engine_submit(engine); return 0; } @@ -1480,6 +1488,7 @@ static int gen8_render_add_request(struct drm_i915_gem_request *req) { struct intel_engine_cs *engine = req->engine; + struct intel_ringbuffer *ring = req->ringbuf; int ret; if (engine->semaphore.signal) @@ -1489,18 +1498,18 @@ gen8_render_add_request(struct drm_i915_gem_request *req) if (ret) return ret; - intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6)); - intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB | - PIPE_CONTROL_CS_STALL | - PIPE_CONTROL_QW_WRITE)); - intel_ring_emit(engine, intel_hws_seqno_address(req->engine)); - intel_ring_emit(engine, 0); - intel_ring_emit(engine, i915_gem_request_get_seqno(req)); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(ring, (PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_QW_WRITE)); + intel_ring_emit(ring, intel_hws_seqno_address(engine)); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, i915_gem_request_get_seqno(req)); /* We're thrashing one dword of HWS. */ - intel_ring_emit(engine, 0); - intel_ring_emit(engine, MI_USER_INTERRUPT); - intel_ring_emit(engine, MI_NOOP); - __intel_ring_advance(engine); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, MI_USER_INTERRUPT); + intel_ring_emit(ring, MI_NOOP); + __intel_engine_submit(engine); return 0; } @@ -1524,9 +1533,9 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req, struct intel_engine_cs *signaller, u32 seqno) { - struct intel_engine_cs *waiter = waiter_req->engine; + struct intel_ringbuffer *waiter = waiter_req->ringbuf; struct drm_i915_private *dev_priv = waiter_req->i915; - u64 offset = GEN8_WAIT_OFFSET(waiter, signaller->id); + u64 offset = GEN8_WAIT_OFFSET(waiter_req->engine, signaller->id); struct i915_hw_ppgtt *ppgtt; int ret; @@ -1558,11 +1567,11 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req, struct intel_engine_cs *signaller, u32 seqno) { - struct intel_engine_cs *waiter = waiter_req->engine; + struct intel_ringbuffer *waiter = waiter_req->ringbuf; u32 dw1 = MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER; - u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id]; + u32 wait_mbox = signaller->semaphore.mbox.wait[waiter_req->engine->id]; int ret; /* Throughout all of the GEM code, seqno passed implies our current @@ -1692,35 +1701,34 @@ bsd_ring_flush(struct drm_i915_gem_request *req, u32 invalidate_domains, u32 flush_domains) { - struct intel_engine_cs *engine = req->engine; + struct intel_ringbuffer *ring = req->ringbuf; int ret; ret = intel_ring_begin(req, 2); if (ret) return ret; - intel_ring_emit(engine, MI_FLUSH); - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + intel_ring_emit(ring, MI_FLUSH); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } static int i9xx_add_request(struct drm_i915_gem_request *req) { - struct intel_engine_cs *engine = req->engine; + struct intel_ringbuffer *ring = req->ringbuf; int ret; ret = intel_ring_begin(req, 4); if (ret) return ret; - intel_ring_emit(engine, MI_STORE_DWORD_INDEX); - intel_ring_emit(engine, - I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(engine, req->fence.seqno); - intel_ring_emit(engine, MI_USER_INTERRUPT); - __intel_ring_advance(engine); + intel_ring_emit(ring, MI_STORE_DWORD_INDEX); + intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(ring, req->fence.seqno); + intel_ring_emit(ring, MI_USER_INTERRUPT); + __intel_engine_submit(req->engine); return 0; } @@ -1787,20 +1795,20 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req, u64 offset, u32 length, unsigned dispatch_flags) { - struct intel_engine_cs *engine = req->engine; + struct intel_ringbuffer *ring = req->ringbuf; int ret; ret = intel_ring_begin(req, 2); if (ret) return ret; - intel_ring_emit(engine, + intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); - intel_ring_emit(engine, offset); - intel_ring_advance(engine); + intel_ring_emit(ring, offset); + intel_ring_advance(ring); return 0; } @@ -1814,8 +1822,8 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req, u64 offset, u32 len, unsigned dispatch_flags) { - struct intel_engine_cs *engine = req->engine; - u32 cs_offset = engine->scratch.gtt_offset; + struct intel_ringbuffer *ring = req->ringbuf; + u32 cs_offset = req->engine->scratch.gtt_offset; int ret; ret = intel_ring_begin(req, 6); @@ -1823,13 +1831,13 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req, return ret; /* Evict the invalid PTE TLBs */ - intel_ring_emit(engine, COLOR_BLT_CMD | BLT_WRITE_RGBA); - intel_ring_emit(engine, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096); - intel_ring_emit(engine, I830_TLB_ENTRIES << 16 | 4); /* load each page */ - intel_ring_emit(engine, cs_offset); - intel_ring_emit(engine, 0xdeadbeef); - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA); + intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096); + intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */ + intel_ring_emit(ring, cs_offset); + intel_ring_emit(ring, 0xdeadbeef); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) { if (len > I830_BATCH_LIMIT) @@ -1843,17 +1851,17 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req, * stable batch scratch bo area (so that the CS never * stumbles over its tlb invalidation bug) ... */ - intel_ring_emit(engine, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA); - intel_ring_emit(engine, + intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA); + intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096); - intel_ring_emit(engine, DIV_ROUND_UP(len, 4096) << 16 | 4096); - intel_ring_emit(engine, cs_offset); - intel_ring_emit(engine, 4096); - intel_ring_emit(engine, offset); + intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096); + intel_ring_emit(ring, cs_offset); + intel_ring_emit(ring, 4096); + intel_ring_emit(ring, offset); - intel_ring_emit(engine, MI_FLUSH); - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + intel_ring_emit(ring, MI_FLUSH); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); /* ... and execute it. */ offset = cs_offset; @@ -1863,10 +1871,10 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req, if (ret) return ret; - intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT); - intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ? - 0 : MI_BATCH_NON_SECURE)); - intel_ring_advance(engine); + intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); + intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ? + 0 : MI_BATCH_NON_SECURE)); + intel_ring_advance(ring); return 0; } @@ -1876,17 +1884,17 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req, u64 offset, u32 len, unsigned dispatch_flags) { - struct intel_engine_cs *engine = req->engine; + struct intel_ringbuffer *ring = req->ringbuf; int ret; ret = intel_ring_begin(req, 2); if (ret) return ret; - intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT); - intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ? - 0 : MI_BATCH_NON_SECURE)); - intel_ring_advance(engine); + intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); + intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ? + 0 : MI_BATCH_NON_SECURE)); + intel_ring_advance(ring); return 0; } @@ -2418,8 +2426,9 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) /* Align the ring tail to a cacheline boundary */ int intel_ring_cacheline_align(struct drm_i915_gem_request *req) { - struct intel_engine_cs *engine = req->engine; - int num_dwords = (engine->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); + struct intel_ringbuffer *ring = req->ringbuf; + int num_dwords = + (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); int ret; if (num_dwords == 0) @@ -2431,9 +2440,9 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req) return ret; while (num_dwords--) - intel_ring_emit(engine, MI_NOOP); + intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(engine); + intel_ring_advance(ring); return 0; } @@ -2524,7 +2533,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine, static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 invalidate, u32 flush) { - struct intel_engine_cs *engine = req->engine; + struct intel_ringbuffer *ring = req->ringbuf; uint32_t cmd; int ret; @@ -2552,17 +2561,16 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, if (invalidate & I915_GEM_GPU_DOMAINS) cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; - intel_ring_emit(engine, cmd); - intel_ring_emit(engine, - I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); + intel_ring_emit(ring, cmd); + intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); if (INTEL_GEN(req->i915) >= 8) { - intel_ring_emit(engine, 0); /* upper addr */ - intel_ring_emit(engine, 0); /* value */ + intel_ring_emit(ring, 0); /* upper addr */ + intel_ring_emit(ring, 0); /* value */ } else { - intel_ring_emit(engine, 0); - intel_ring_emit(engine, MI_NOOP); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, MI_NOOP); } - intel_ring_advance(engine); + intel_ring_advance(ring); return 0; } @@ -2571,8 +2579,8 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, u64 offset, u32 len, unsigned dispatch_flags) { - struct intel_engine_cs *engine = req->engine; - bool ppgtt = USES_PPGTT(engine->dev) && + struct intel_ringbuffer *ring = req->ringbuf; + bool ppgtt = USES_PPGTT(req->i915) && !(dispatch_flags & I915_DISPATCH_SECURE); int ret; @@ -2581,13 +2589,13 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, return ret; /* FIXME(BDW): Address space and security selectors. */ - intel_ring_emit(engine, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) | + intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) | (dispatch_flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0)); - intel_ring_emit(engine, lower_32_bits(offset)); - intel_ring_emit(engine, upper_32_bits(offset)); - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + intel_ring_emit(ring, lower_32_bits(offset)); + intel_ring_emit(ring, upper_32_bits(offset)); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } @@ -2597,22 +2605,22 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, u64 offset, u32 len, unsigned dispatch_flags) { - struct intel_engine_cs *engine = req->engine; + struct intel_ringbuffer *ring = req->ringbuf; int ret; ret = intel_ring_begin(req, 2); if (ret) return ret; - intel_ring_emit(engine, + intel_ring_emit(ring, MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) | (dispatch_flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0)); /* bit0-7 is the length on GEN6+ */ - intel_ring_emit(engine, offset); - intel_ring_advance(engine); + intel_ring_emit(ring, offset); + intel_ring_advance(ring); return 0; } @@ -2622,20 +2630,20 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, u64 offset, u32 len, unsigned dispatch_flags) { - struct intel_engine_cs *engine = req->engine; + struct intel_ringbuffer *ring = req->ringbuf; int ret; ret = intel_ring_begin(req, 2); if (ret) return ret; - intel_ring_emit(engine, + intel_ring_emit(ring, MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); /* bit0-7 is the length on GEN6+ */ - intel_ring_emit(engine, offset); - intel_ring_advance(engine); + intel_ring_emit(ring, offset); + intel_ring_advance(ring); return 0; } @@ -2645,7 +2653,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 invalidate, u32 flush) { - struct intel_engine_cs *engine = req->engine; + struct intel_ringbuffer *ring = req->ringbuf; uint32_t cmd; int ret; @@ -2672,17 +2680,17 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, */ if (invalidate & I915_GEM_DOMAIN_RENDER) cmd |= MI_INVALIDATE_TLB; - intel_ring_emit(engine, cmd); - intel_ring_emit(engine, + intel_ring_emit(ring, cmd); + intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); if (INTEL_GEN(req->i915) >= 8) { - intel_ring_emit(engine, 0); /* upper addr */ - intel_ring_emit(engine, 0); /* value */ + intel_ring_emit(ring, 0); /* upper addr */ + intel_ring_emit(ring, 0); /* value */ } else { - intel_ring_emit(engine, 0); - intel_ring_emit(engine, MI_NOOP); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, MI_NOOP); } - intel_ring_advance(engine); + intel_ring_advance(ring); return 0; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 9a0a02653039..4f4b8ea4df82 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -454,32 +454,21 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request); int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n); int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); -static inline void __intel_ringbuffer_emit(struct intel_ringbuffer *rb, - u32 data) +static inline void intel_ring_emit(struct intel_ringbuffer *ring, u32 data) { - *(uint32_t *)(rb->vaddr + rb->tail) = data; - rb->tail += 4; + *(uint32_t *)(ring->vaddr + ring->tail) = data; + ring->tail += 4; } -static inline void __intel_ringbuffer_advance(struct intel_ringbuffer *rb) -{ - rb->tail &= rb->size - 1; -} - -static inline void intel_ring_emit(struct intel_engine_cs *engine, u32 data) -{ - __intel_ringbuffer_emit(engine->buffer, data); -} - -static inline void intel_ring_emit_reg(struct intel_engine_cs *engine, +static inline void intel_ring_emit_reg(struct intel_ringbuffer *ring, i915_reg_t reg) { - intel_ring_emit(engine, i915_mmio_reg_offset(reg)); + intel_ring_emit(ring, i915_mmio_reg_offset(reg)); } -static inline void intel_ring_advance(struct intel_engine_cs *engine) +static inline void intel_ring_advance(struct intel_ringbuffer *ring) { - __intel_ringbuffer_advance(engine->buffer); + ring->tail &= ring->size - 1; } int __intel_ring_space(int head, int tail, int size); -- 2.8.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx