On Tue, Dec 09, 2014 at 12:59:11PM +0000, John.C.Harrison@xxxxxxxxx wrote: > From: John Harrison <John.C.Harrison@xxxxxxxxx> > > The scheduler decouples the submission of batch buffers to the driver with their > submission to the hardware. This basically means splitting the execbuffer() > function in half. This change rearranges some code ready for the split to occur. Now there's the curios question: Where will the split in top/bottom halves be? Without that I have no idea whether it makes sense and so can't review this patch. At least if the goal is to really prep for the scheduler and not just move a few lines around ;-) -Daniel > > Change-Id: Icc9c8afaac18821f3eb8a151a49f918f90c068a3 > For: VIZ-1587 > Signed-off-by: John Harrison <John.C.Harrison@xxxxxxxxx> > --- > drivers/gpu/drm/i915/i915_gem_execbuffer.c | 47 +++++++++++++++++----------- > drivers/gpu/drm/i915/intel_lrc.c | 17 +++++++--- > 2 files changed, 40 insertions(+), 24 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c > index f09501c..a339556 100644 > --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c > +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c > @@ -848,10 +848,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring, > if (flush_domains & I915_GEM_DOMAIN_GTT) > wmb(); > > - /* Unconditionally invalidate gpu caches and ensure that we do flush > - * any residual writes from the previous batch. > - */ > - return intel_ring_invalidate_all_caches(ring); > + return 0; > } > > static bool > @@ -1123,14 +1120,6 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, > } > } > > - ret = i915_gem_execbuffer_move_to_gpu(ring, vmas); > - if (ret) > - goto error; > - > - ret = i915_switch_context(ring, ctx); > - if (ret) > - goto error; > - > instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; > instp_mask = I915_EXEC_CONSTANTS_MASK; > switch (instp_mode) { > @@ -1168,6 +1157,28 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, > goto error; > } > > + ret = i915_gem_execbuffer_move_to_gpu(ring, vmas); > + if (ret) > + goto error; > + > + i915_gem_execbuffer_move_to_active(vmas, ring); > + > + /* To be split into two functions here... */ > + > + intel_runtime_pm_get(dev_priv); > + > + /* Unconditionally invalidate gpu caches and ensure that we do flush > + * any residual writes from the previous batch. > + */ > + ret = intel_ring_invalidate_all_caches(ring); > + if (ret) > + goto error; > + > + /* Switch to the correct context for the batch */ > + ret = i915_switch_context(ring, ctx); > + if (ret) > + goto error; > + > if (ring == &dev_priv->ring[RCS] && > instp_mode != dev_priv->relative_constants_mode) { > ret = intel_ring_begin(ring, 4); > @@ -1208,15 +1219,18 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, > exec_start, exec_len, > dispatch_flags); > if (ret) > - return ret; > + goto error; > } > > trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags); > > - i915_gem_execbuffer_move_to_active(vmas, ring); > i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); > > error: > + /* intel_gpu_busy should also get a ref, so it will free when the device > + * is really idle. */ > + intel_runtime_pm_put(dev_priv); > + > kfree(cliprects); > return ret; > } > @@ -1335,8 +1349,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, > return -EINVAL; > } > > - intel_runtime_pm_get(dev_priv); > - > ret = i915_mutex_lock_interruptible(dev); > if (ret) > goto pre_mutex_err; > @@ -1467,9 +1479,6 @@ err: > mutex_unlock(&dev->struct_mutex); > > pre_mutex_err: > - /* intel_gpu_busy should also get a ref, so it will free when the device > - * is really idle. */ > - intel_runtime_pm_put(dev_priv); > return ret; > } > > diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c > index 037cbd5..f16b15d 100644 > --- a/drivers/gpu/drm/i915/intel_lrc.c > +++ b/drivers/gpu/drm/i915/intel_lrc.c > @@ -630,10 +630,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf, > if (flush_domains & I915_GEM_DOMAIN_GTT) > wmb(); > > - /* Unconditionally invalidate gpu caches and ensure that we do flush > - * any residual writes from the previous batch. > - */ > - return logical_ring_invalidate_all_caches(ringbuf); > + return 0; > } > > /** > @@ -717,6 +714,17 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file, > if (ret) > return ret; > > + i915_gem_execbuffer_move_to_active(vmas, ring); > + > + /* To be split into two functions here... */ > + > + /* Unconditionally invalidate gpu caches and ensure that we do flush > + * any residual writes from the previous batch. > + */ > + ret = logical_ring_invalidate_all_caches(ringbuf); > + if (ret) > + return ret; > + > if (ring == &dev_priv->ring[RCS] && > instp_mode != dev_priv->relative_constants_mode) { > ret = intel_logical_ring_begin(ringbuf, 4); > @@ -738,7 +746,6 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file, > > trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags); > > - i915_gem_execbuffer_move_to_active(vmas, ring); > i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); > > return 0; > -- > 1.7.9.5 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@xxxxxxxxxxxxxxxxxxxxx > http://lists.freedesktop.org/mailman/listinfo/intel-gfx -- Daniel Vetter Software Engineer, Intel Corporation +41 (0) 79 365 57 48 - http://blog.ffwll.ch _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx