At the start of request emission, we flush some space for the request, estimating the typical size for the request body. The common tail is now much larger than the typical body, so we can shrink the flush substantially. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/intel_lrc.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 874c2515f9d4..ec2522243b5e 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -228,6 +228,9 @@ enum { #define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 #define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26 +/* Typical size of the average request (2 pipecontrols and a MI_BB) */ +#define EXECLISTS_REQUEST_SIZE 64 /* bytes */ + static int execlists_context_deferred_alloc(struct intel_context *ctx, struct intel_engine_cs *engine); static int intel_lr_context_pin(struct intel_context *ctx, @@ -681,7 +684,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request * we start building the request - in which case we will just * have to repeat work. */ - request->reserved_space += MIN_SPACE_FOR_ADD_REQUEST; + request->reserved_space += EXECLISTS_REQUEST_SIZE; if (request->ctx->engine[engine->id].state == NULL) { ret = execlists_context_deferred_alloc(request->ctx, engine); @@ -727,7 +730,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request * to cancel/unwind this request now. */ - request->reserved_space -= MIN_SPACE_FOR_ADD_REQUEST; + request->reserved_space -= EXECLISTS_REQUEST_SIZE; return 0; err_unpin: -- 2.8.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx