On 11/05/2017 20:59, Chris Wilson wrote:
The i915_priolist are allocated within an atomic context on a path where we wish to minimise latency. If we use a dedicated kmem_cache, we have the advantage of a local freelist from which to service new requests that should keep the latency impact of an allocation small. Though currently we expect the majority of requests to be at default priority (and so hit the preallocate priolist), once userspace starts using priorities they are likely to use many fine grained policies improving the utilisation of a private slab. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Cc: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem.c | 9 ++++++++- drivers/gpu/drm/i915/i915_guc_submission.c | 2 +- drivers/gpu/drm/i915/intel_lrc.c | 4 ++-- 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ff3574a56812..7f192efbe088 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2027,6 +2027,7 @@ struct drm_i915_private { struct kmem_cache *vmas; struct kmem_cache *requests; struct kmem_cache *dependencies; + struct kmem_cache *priorities; const struct intel_device_info info; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 75d7575b81f4..f52d72b1a5b4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4871,12 +4871,16 @@ i915_gem_load_init(struct drm_i915_private *dev_priv) if (!dev_priv->dependencies) goto err_requests; + dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN); + if (!dev_priv->priorities) + goto err_dependencies; + mutex_lock(&dev_priv->drm.struct_mutex); INIT_LIST_HEAD(&dev_priv->gt.timelines); err = i915_gem_timeline_init__global(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); if (err) - goto err_dependencies; + goto err_priorities; INIT_LIST_HEAD(&dev_priv->context_list); INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work); @@ -4900,6 +4904,8 @@ i915_gem_load_init(struct drm_i915_private *dev_priv) return 0; +err_priorities: + kmem_cache_destroy(dev_priv->priorities); err_dependencies: kmem_cache_destroy(dev_priv->dependencies); err_requests: @@ -4923,6 +4929,7 @@ void i915_gem_load_cleanup(struct drm_i915_private *dev_priv) WARN_ON(!list_empty(&dev_priv->gt.timelines)); mutex_unlock(&dev_priv->drm.struct_mutex); + kmem_cache_destroy(dev_priv->priorities); kmem_cache_destroy(dev_priv->dependencies); kmem_cache_destroy(dev_priv->requests); kmem_cache_destroy(dev_priv->vmas); diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 440bab856cc2..53c022c47687 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -704,7 +704,7 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine) rb_erase(&p->node, &engine->execlist_queue); INIT_LIST_HEAD(&p->requests); if (p->priority != I915_PRIORITY_NORMAL) - kfree(p); + kmem_cache_free(engine->i915->priorities, p); } done: engine->execlist_first = rb; diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 849b35796de7..7e41529bd074 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -500,7 +500,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) rb_erase(&p->node, &engine->execlist_queue); INIT_LIST_HEAD(&p->requests); if (p->priority != I915_PRIORITY_NORMAL) - kfree(p); + kmem_cache_free(engine->i915->priorities, p); } done: engine->execlist_first = rb; @@ -659,7 +659,7 @@ insert_request(struct intel_engine_cs *engine, if (prio == I915_PRIORITY_NORMAL) { p = &engine->default_priolist; } else { - p = kmalloc(sizeof(*p), GFP_ATOMIC); + p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC); /* Convert an allocation failure to a priority bump */ if (unlikely(!p)) { prio = I915_PRIORITY_NORMAL; /* recurses just once */
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx> Regards, Tvrtko _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx