On ti, 2016-04-26 at 21:06 +0100, Chris Wilson wrote: > Now that we share intel_ring_begin(), reserving space for the tail of > the request is identical between legacy/execlists and so the tautology > can be removed. In the process, we move the reserved space tracking > from the ringbuffer on to the request. This is to enable us to reorder > the reserved space allocation in the next patch. > > Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> > Cc: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx> > Cc: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxxxxxxxx> Some comments below. Reviewed-by: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx> > --- > drivers/gpu/drm/i915/i915_drv.h | 3 +++ > drivers/gpu/drm/i915/i915_gem.c | 21 ++++++++++------ > drivers/gpu/drm/i915/intel_lrc.c | 15 ----------- > drivers/gpu/drm/i915/intel_ringbuffer.c | 44 +++------------------------------ > drivers/gpu/drm/i915/intel_ringbuffer.h | 17 ------------- > 5 files changed, 19 insertions(+), 81 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h > index 444a8ea0c5c4..831da9f43324 100644 > --- a/drivers/gpu/drm/i915/i915_drv.h > +++ b/drivers/gpu/drm/i915/i915_drv.h > @@ -2278,6 +2278,9 @@ struct drm_i915_gem_request { > /** Position in the ringbuffer of the end of the whole request */ > u32 tail; > > + /** Preallocate space in the ringbuffer for the emitting the request */ > + u32 reserved_space; > + > /** > * Context and ring buffer related to this request > * Contexts are refcounted, so when this request is associated with a > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c > index 71135c3ce44e..0e27484bd28a 100644 > --- a/drivers/gpu/drm/i915/i915_gem.c > +++ b/drivers/gpu/drm/i915/i915_gem.c > @@ -2573,6 +2573,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, > struct drm_i915_private *dev_priv; > struct intel_ringbuffer *ringbuf; > u32 request_start; > + u32 reserved_tail; > int ret; > > if (WARN_ON(request == NULL)) > @@ -2587,9 +2588,10 @@ void __i915_add_request(struct drm_i915_gem_request *request, > * should already have been reserved in the ring buffer. Let the ring > * know that it is time to use that space up. > */ > - intel_ring_reserved_space_use(ringbuf); > - > request_start = intel_ring_get_tail(ringbuf); > + reserved_tail = request->reserved_space; > + request->reserved_space = 0; > + > /* > * Emit any outstanding flushes - execbuf can fail to emit the flush > * after having emitted the batchbuffer command. Hence we need to fix > @@ -2653,7 +2655,13 @@ void __i915_add_request(struct drm_i915_gem_request *request, > intel_mark_busy(dev_priv->dev); > > /* Sanity check that the reserved size was large enough. */ > - intel_ring_reserved_space_end(ringbuf); > + ret = intel_ring_get_tail(ringbuf) - request_start; > + if (ret < 0) > + ret += ringbuf->size; > + WARN_ONCE(ret > reserved_tail, > + "Not enough space reserved (%d bytes) " > + "for adding the request (%d bytes)\n", > + reserved_tail, ret); Not sure if I'd rather prefer this to stay in an enclosed function and not spill ringbuf stuff in the function. > } > > static bool i915_context_is_banned(struct drm_i915_private *dev_priv, > @@ -2774,17 +2782,14 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine, > * to be redone if the request is not actually submitted straight > * away, e.g. because a GPU scheduler has deferred it. > */ > - if (i915.enable_execlists) > - ret = intel_logical_ring_reserve_space(req); > - else > - ret = intel_ring_reserve_space(req); > + req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; > + ret = intel_ring_begin(req, 0); > if (ret) { > /* > * At this point, the request is fully allocated even if not > * fully prepared. Thus it can be cleaned up using the proper > * free code. > */ > - intel_ring_reserved_space_cancel(req->ringbuf); You could append this to the above comment that the resrved space will be released along it. > i915_gem_request_unreference(req); > return ret; > } > diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c > index ba87c94928e7..910044cf143e 100644 > --- a/drivers/gpu/drm/i915/intel_lrc.c > +++ b/drivers/gpu/drm/i915/intel_lrc.c > @@ -772,21 +772,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) > return 0; > } > > -int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request) > -{ > - /* > - * The first call merely notes the reserve request and is common for > - * all back ends. The subsequent localised _begin() call actually > - * ensures that the reservation is available. Without the begin, if > - * the request creator immediately submitted the request without > - * adding any commands to it then there might not actually be > - * sufficient room for the submission commands. > - */ > - intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); > - > - return intel_ring_begin(request, 0); > -} > - > /** > * execlists_submission() - submit a batchbuffer for execution, Execlists style > * @dev: DRM device. > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c > index 1d3d2ea3e9bc..ba5946b9fa06 100644 > --- a/drivers/gpu/drm/i915/intel_ringbuffer.c > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c > @@ -2336,44 +2336,6 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) > return 0; > } > > -int intel_ring_reserve_space(struct drm_i915_gem_request *request) > -{ > - /* > - * The first call merely notes the reserve request and is common for > - * all back ends. The subsequent localised _begin() call actually > - * ensures that the reservation is available. Without the begin, if > - * the request creator immediately submitted the request without > - * adding any commands to it then there might not actually be > - * sufficient room for the submission commands. > - */ > - intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); > - > - return intel_ring_begin(request, 0); > -} > - > -void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size) > -{ > - GEM_BUG_ON(ringbuf->reserved_size); > - ringbuf->reserved_size = size; > -} > - > -void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf) > -{ > - GEM_BUG_ON(!ringbuf->reserved_size); > - ringbuf->reserved_size = 0; > -} > - > -void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf) > -{ > - GEM_BUG_ON(!ringbuf->reserved_size); > - ringbuf->reserved_size = 0; > -} > - > -void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) > -{ > - GEM_BUG_ON(ringbuf->reserved_size); > -} > - > static int wait_for_space(struct drm_i915_gem_request *req, int bytes) > { > struct intel_ringbuffer *ringbuf = req->ringbuf; > @@ -2385,7 +2347,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes) > return 0; > > /* The whole point of reserving space is to not wait! */ > - GEM_BUG_ON(!ringbuf->reserved_size); > + GEM_BUG_ON(!req->reserved_space); > > list_for_each_entry(target, &engine->request_list, list) { > unsigned space; > @@ -2420,7 +2382,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) > int total_bytes, wait_bytes; > bool need_wrap = false; > > - total_bytes = bytes + ringbuf->reserved_size; > + total_bytes = bytes + req->reserved_space; > > if (unlikely(bytes > remain_usable)) { > /* > @@ -2436,7 +2398,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) > * and only need to effectively wait for the reserved > * size space from the start of ringbuffer. > */ > - wait_bytes = remain_actual + ringbuf->reserved_size; > + wait_bytes = remain_actual + req->reserved_space; > } else > /* No wrapping required, just waiting. */ > wait_bytes = total_bytes; > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h > index 201e7752d765..038914ccc6fd 100644 > --- a/drivers/gpu/drm/i915/intel_ringbuffer.h > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h > @@ -107,7 +107,6 @@ struct intel_ringbuffer { > int space; > int size; > int effective_size; > - int reserved_size; > > /** We track the position of the requests in the ring buffer, and > * when each is retired we increment last_retired_head as the GPU > @@ -491,20 +490,4 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) > */ > #define MIN_SPACE_FOR_ADD_REQUEST 160 > > -/* > - * Reserve space in the ring to guarantee that the i915_add_request() call > - * will always have sufficient room to do its stuff. The request creation > - * code calls this automatically. > - */ > -void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size); > -/* Cancel the reservation, e.g. because the request is being discarded. */ > -void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf); > -/* Use the reserved space - for use by i915_add_request() only. */ > -void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf); > -/* Finish with the reserved space - for use by i915_add_request() only. */ > -void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf); > - > -/* Legacy ringbuffer specific portion of reservation code: */ > -int intel_ring_reserve_space(struct drm_i915_gem_request *request); > - > #endif /* _INTEL_RINGBUFFER_H_ */ -- Joonas Lahtinen Open Source Technology Center Intel Corporation _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx