Push the ring creation flags from the outer GEM context to the inner intel_context to avoid an unsightly back-reference from inside the backend. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Reviewed-by: Andi Shyti <andi.shyti@xxxxxxxxx> --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 52 ++++++++++++++----- .../gpu/drm/i915/gem/i915_gem_context_types.h | 3 -- drivers/gpu/drm/i915/gt/intel_context.c | 1 + drivers/gpu/drm/i915/gt/intel_context.h | 5 ++ drivers/gpu/drm/i915/gt/intel_engine_cs.c | 2 + drivers/gpu/drm/i915/gt/intel_lrc.c | 5 +- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 2 +- drivers/gpu/drm/i915/gt/mock_engine.c | 2 +- drivers/gpu/drm/i915/i915_debugfs.c | 23 +++++--- 9 files changed, 65 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index fae8ca72e240..658604f44620 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -436,8 +436,6 @@ __create_context(struct drm_i915_private *i915) i915_gem_context_set_bannable(ctx); i915_gem_context_set_recoverable(ctx); - ctx->ring_size = 4 * PAGE_SIZE; - for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; @@ -448,22 +446,34 @@ __create_context(struct drm_i915_private *i915) return ERR_PTR(err); } +static void +context_apply_all(struct i915_gem_context *ctx, + void (*fn)(struct intel_context *ce, void *data), + void *data) +{ + struct i915_gem_engines_iter it; + struct intel_context *ce; + + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) + fn(ce, data); + i915_gem_context_unlock_engines(ctx); +} + +static void __apply_ppgtt(struct intel_context *ce, void *vm) +{ + i915_vm_put(ce->vm); + ce->vm = i915_vm_get(vm); +} + static struct i915_address_space * __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm) { struct i915_address_space *old = ctx->vm; - struct i915_gem_engines_iter it; - struct intel_context *ce; GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old)); ctx->vm = i915_vm_get(vm); - - for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { - i915_vm_put(ce->vm); - ce->vm = i915_vm_get(vm); - } - i915_gem_context_unlock_engines(ctx); + context_apply_all(ctx, __apply_ppgtt, vm); return old; } @@ -529,6 +539,11 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags) return ctx; } +static void __apply_ring_size(struct intel_context *ce, void *data) +{ + ce->ring = data; +} + /** * i915_gem_context_create_gvt - create a GVT GEM context * @dev: drm device * @@ -543,6 +558,7 @@ struct i915_gem_context * i915_gem_context_create_gvt(struct drm_device *dev) { struct i915_gem_context *ctx; + unsigned long ring_size; int ret; if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) @@ -567,8 +583,14 @@ i915_gem_context_create_gvt(struct drm_device *dev) i915_gem_context_set_closed(ctx); /* not user accessible */ i915_gem_context_clear_bannable(ctx); i915_gem_context_set_force_single_submission(ctx); + + ring_size = SZ_4K; if (!USES_GUC_SUBMISSION(to_i915(dev))) - ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */ + ring_size = 512 * SZ_4K; /* max */ + + context_apply_all(ctx, + __apply_ring_size, + __intel_context_ring_size(ring_size)); GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); out: @@ -607,7 +629,6 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio) i915_gem_context_clear_bannable(ctx); ctx->sched.priority = I915_USER_PRIORITY(prio); - ctx->ring_size = PAGE_SIZE; GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); @@ -1591,6 +1612,7 @@ set_engines(struct i915_gem_context *ctx, for (n = 0; n < num_engines; n++) { struct i915_engine_class_instance ci; struct intel_engine_cs *engine; + struct intel_context *ce; if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) { __free_engines(set.engines, n); @@ -1613,11 +1635,13 @@ set_engines(struct i915_gem_context *ctx, return -ENOENT; } - set.engines->engines[n] = intel_context_create(ctx, engine); - if (!set.engines->engines[n]) { + ce = intel_context_create(ctx, engine); + if (!ce) { __free_engines(set.engines, n); return -ENOMEM; } + + set.engines->engines[n] = ce; } set.engines->num_engines = num_engines; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h index a02d98494078..260d59cc3de8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h @@ -169,9 +169,6 @@ struct i915_gem_context { struct i915_sched_attr sched; - /** ring_size: size for allocating the per-engine ring buffer */ - u32 ring_size; - /** guilty_count: How many times this context has caused a GPU hang. */ atomic_t guilty_count; /** diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 41d38e661de7..6d1d4e8dbfc9 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -222,6 +222,7 @@ intel_context_init(struct intel_context *ce, ce->engine = engine; ce->ops = engine->cops; ce->sseu = engine->sseu; + ce->ring = __intel_context_ring_size(SZ_16K); INIT_LIST_HEAD(&ce->signal_link); INIT_LIST_HEAD(&ce->signals); diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index 07f9924de48f..13f28dd316bc 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -136,4 +136,9 @@ int intel_context_prepare_remote_request(struct intel_context *ce, struct i915_request *intel_context_create_request(struct intel_context *ce); +static inline struct intel_ring *__intel_context_ring_size(u64 sz) +{ + return u64_to_ptr(struct intel_ring, sz); +} + #endif /* __INTEL_CONTEXT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index feb9d1178d24..22e8943cf161 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -718,6 +718,8 @@ static int pin_context(struct i915_gem_context *ctx, if (IS_ERR(ce)) return PTR_ERR(ce); + ce->ring = __intel_context_ring_size(SZ_4K); + err = intel_context_pin(ce); intel_context_put(ce); if (err) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index ea66713ae94c..6a3015176714 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -3116,9 +3116,8 @@ static int __execlists_context_alloc(struct intel_context *ce, goto error_deref_obj; } - ring = intel_engine_create_ring(engine, - timeline, - ce->gem_context->ring_size); + ring = intel_engine_create_ring(engine, timeline, + (unsigned long)ce->ring); intel_timeline_put(timeline); if (IS_ERR(ring)) { ret = PTR_ERR(ring); diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index e232c1998540..bb69f51ccbfa 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -2349,7 +2349,7 @@ int intel_ring_submission_init(struct intel_engine_cs *engine) } GEM_BUG_ON(timeline->has_initial_breadcrumb); - ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE); + ring = intel_engine_create_ring(engine, timeline, SZ_16K); intel_timeline_put(timeline); if (IS_ERR(ring)) { err = PTR_ERR(ring); diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index 2690922eed56..3aca88604d2d 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -140,7 +140,7 @@ static void mock_context_destroy(struct kref *ref) GEM_BUG_ON(intel_context_is_pinned(ce)); - if (ce->ring) + if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) mock_ring_free(ce->ring); intel_context_fini(ce); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index f589da7f56f2..2d127eacdd5f 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -309,10 +309,14 @@ static void print_context_stats(struct seq_file *m, for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { - if (ce->state) - per_file_stats(0, ce->state->obj, &kstats); - if (ce->ring) + intel_context_lock_pinned(ce); + if (intel_context_is_pinned(ce)) { + if (ce->state) + per_file_stats(0, + ce->state->obj, &kstats); per_file_stats(0, ce->ring->vma->obj, &kstats); + } + intel_context_unlock_pinned(ce); } i915_gem_context_unlock_engines(ctx); @@ -1612,12 +1616,15 @@ static int i915_context_status(struct seq_file *m, void *unused) for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { - seq_printf(m, "%s: ", ce->engine->name); - if (ce->state) - describe_obj(m, ce->state->obj); - if (ce->ring) + intel_context_lock_pinned(ce); + if (intel_context_is_pinned(ce)) { + seq_printf(m, "%s: ", ce->engine->name); + if (ce->state) + describe_obj(m, ce->state->obj); describe_ctx_ring(m, ce->ring); - seq_putc(m, '\n'); + seq_putc(m, '\n'); + } + intel_context_unlock_pinned(ce); } i915_gem_context_unlock_engines(ctx); -- 2.23.0.rc1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx