Allocate only an internal intel_context for the kernel_context, forgoing a global GEM context for internal use as we only require a separate address space (for our own protection). Now having weaned GT from requiring ce->gem_context, we can stop referencing it entirely. This also means we no longer have to create random and unnecessary GEM contexts for internal use. GEM contexts are now entirely for tracking GEM clients, and intel_context the execution environment on the GPU. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 91 +++++------- drivers/gpu/drm/i915/gem/i915_gem_context.h | 6 +- .../gpu/drm/i915/gem/i915_gem_execbuffer.c | 3 +- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 6 +- .../drm/i915/gem/selftests/i915_gem_context.c | 5 +- .../gpu/drm/i915/gem/selftests/mock_context.c | 11 +- drivers/gpu/drm/i915/gt/intel_context.c | 32 +--- drivers/gpu/drm/i915/gt/intel_context.h | 4 +- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 21 +-- drivers/gpu/drm/i915/gt/intel_gt.c | 26 +++- drivers/gpu/drm/i915/gt/intel_gt_types.h | 7 + drivers/gpu/drm/i915/gt/intel_lrc.c | 18 +-- drivers/gpu/drm/i915/gt/intel_lrc.h | 6 +- drivers/gpu/drm/i915/gt/intel_reset.c | 6 +- .../gpu/drm/i915/gt/intel_ring_submission.c | 4 +- drivers/gpu/drm/i915/gt/selftest_context.c | 59 +++----- .../drm/i915/gt/selftest_engine_heartbeat.c | 3 +- drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 29 ++-- drivers/gpu/drm/i915/gt/selftest_lrc.c | 137 ++++++------------ drivers/gpu/drm/i915/gvt/scheduler.c | 16 +- drivers/gpu/drm/i915/i915_drv.h | 3 - drivers/gpu/drm/i915/i915_gem.c | 16 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 8 +- drivers/gpu/drm/i915/i915_gpu_error.c | 8 +- drivers/gpu/drm/i915/i915_perf.c | 3 - drivers/gpu/drm/i915/i915_request.c | 5 +- drivers/gpu/drm/i915/selftests/i915_request.c | 6 +- .../gpu/drm/i915/selftests/mock_gem_device.c | 8 +- 28 files changed, 216 insertions(+), 331 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 455690c80d42..ff9e64c513e7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -170,6 +170,26 @@ lookup_user_engine(struct i915_gem_context *ctx, return i915_gem_context_get_engine(ctx, idx); } +static void intel_context_set_gem(struct intel_context *ce, + struct i915_gem_context *ctx) +{ + ce->gem_context = ctx; + + ce->ring = __intel_context_ring_size(SZ_16K); + + if (ctx->vm) { + i915_vm_put(ce->vm); + ce->vm = i915_vm_get(ctx->vm); + } + + if (ctx->timeline) + ce->timeline = intel_timeline_get(ctx->timeline); + + if (ctx->sched.priority >= I915_PRIORITY_NORMAL && + intel_engine_has_semaphores(ce->engine)) + __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); +} + static void __free_engines(struct i915_gem_engines *e, unsigned int count) { while (count--) { @@ -212,12 +232,14 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx) GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES); GEM_BUG_ON(e->engines[engine->legacy_idx]); - ce = intel_context_create(ctx, engine); + ce = intel_context_create(engine); if (IS_ERR(ce)) { __free_engines(e, e->num_engines + 1); return ERR_CAST(ce); } + intel_context_set_gem(ce, ctx); + e->engines[engine->legacy_idx] = ce; e->num_engines = max(e->num_engines, engine->legacy_idx); } @@ -634,37 +656,6 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags) return ctx; } -static void -destroy_kernel_context(struct i915_gem_context **ctxp) -{ - struct i915_gem_context *ctx; - - /* Keep the context ref so that we can free it immediately ourselves */ - ctx = i915_gem_context_get(fetch_and_zero(ctxp)); - GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); - - context_close(ctx); - i915_gem_context_free(ctx); -} - -struct i915_gem_context * -i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio) -{ - struct i915_gem_context *ctx; - - ctx = i915_gem_create_context(i915, 0); - if (IS_ERR(ctx)) - return ctx; - - i915_gem_context_clear_bannable(ctx); - i915_gem_context_set_persistence(ctx); - ctx->sched.priority = I915_USER_PRIORITY(prio); - - GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); - - return ctx; -} - static void init_contexts(struct i915_gem_contexts *gc) { spin_lock_init(&gc->lock); @@ -674,32 +665,12 @@ static void init_contexts(struct i915_gem_contexts *gc) init_llist_head(&gc->free_list); } -int i915_gem_init_contexts(struct drm_i915_private *i915) +void i915_gem_init_contexts(struct drm_i915_private *i915) { - struct i915_gem_context *ctx; - - /* Reassure ourselves we are only called once */ - GEM_BUG_ON(i915->kernel_context); - init_contexts(&i915->gem.contexts); - - /* lowest priority; idle task */ - ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MIN); - if (IS_ERR(ctx)) { - DRM_ERROR("Failed to create default global context\n"); - return PTR_ERR(ctx); - } - i915->kernel_context = ctx; - DRM_DEBUG_DRIVER("%s context support initialized\n", DRIVER_CAPS(i915)->has_logical_contexts ? "logical" : "fake"); - return 0; -} - -void i915_gem_driver_release__contexts(struct drm_i915_private *i915) -{ - destroy_kernel_context(&i915->kernel_context); } static int context_idr_cleanup(int id, void *p, void *data) @@ -1457,12 +1428,14 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data) } } - ce = intel_execlists_create_virtual(set->ctx, siblings, n); + ce = intel_execlists_create_virtual(siblings, n); if (IS_ERR(ce)) { err = PTR_ERR(ce); goto out_siblings; } + intel_context_set_gem(ce, set->ctx); + if (cmpxchg(&set->engines->engines[idx], NULL, ce)) { intel_context_put(ce); err = -EEXIST; @@ -1632,12 +1605,14 @@ set_engines(struct i915_gem_context *ctx, return -ENOENT; } - ce = intel_context_create(ctx, engine); + ce = intel_context_create(engine); if (IS_ERR(ce)) { __free_engines(set.engines, n); return PTR_ERR(ce); } + intel_context_set_gem(ce, ctx); + set.engines->engines[n] = ce; } set.engines->num_engines = num_engines; @@ -1938,13 +1913,15 @@ static int clone_engines(struct i915_gem_context *dst, */ if (intel_engine_is_virtual(engine)) clone->engines[n] = - intel_execlists_clone_virtual(dst, engine); + intel_execlists_clone_virtual(engine); else - clone->engines[n] = intel_context_create(dst, engine); + clone->engines[n] = intel_context_create(engine); if (IS_ERR_OR_NULL(clone->engines[n])) { __free_engines(clone, n); goto err_unlock; } + + intel_context_set_gem(clone->engines[n], dst); } clone->num_engines = n; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h index 69932899803e..d99fff481d59 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h @@ -115,8 +115,7 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx) } /* i915_gem_context.c */ -int __must_check i915_gem_init_contexts(struct drm_i915_private *i915); -void i915_gem_driver_release__contexts(struct drm_i915_private *i915); +void i915_gem_init_contexts(struct drm_i915_private *i915); int i915_gem_context_open(struct drm_i915_private *i915, struct drm_file *file); @@ -140,9 +139,6 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, struct drm_file *file); -struct i915_gem_context * -i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio); - static inline struct i915_gem_context * i915_gem_context_get(struct i915_gem_context *ctx) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 6dc86c129eee..4f6b847ea011 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -1261,8 +1261,7 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb, * memory location. */ if (cache->gen >= 12) - ce = intel_context_create(eb->context->gem_context, - eb->engine); + ce = intel_context_create(eb->engine); else ce = intel_context_get(eb->context); if (IS_ERR(ce)) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 1e045c337044..432f2828b093 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -753,15 +753,11 @@ i915_gem_userptr_ioctl(struct drm_device *dev, return -EFAULT; if (args->flags & I915_USERPTR_READ_ONLY) { - struct i915_address_space *vm; - /* * On almost all of the older hw, we cannot tell the GPU that * a page is readonly. */ - vm = rcu_dereference_protected(dev_priv->kernel_context->vm, - true); /* static vm */ - if (!vm || !vm->has_read_only) + if (!dev_priv->gt.vm->has_read_only) return -ENODEV; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index c6e61564bb5e..6d174caa8f8b 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -309,7 +309,7 @@ static int live_parallel_switch(void *arg) if (!data[m].ce[0]) continue; - ce = intel_context_create(ctx, data[m].ce[0]->engine); + ce = intel_context_create(data[m].ce[0]->engine); if (IS_ERR(ce)) goto out; @@ -1230,8 +1230,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915, hweight32(engine->sseu.slice_mask), hweight32(pg_sseu.slice_mask)); - ce = intel_context_create(engine->kernel_context->gem_context, - engine); + ce = intel_context_create(engine); if (IS_ERR(ce)) { ret = PTR_ERR(ce); goto out_put; diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c index 29b8984f0e47..f472fc9240d5 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c @@ -97,7 +97,16 @@ live_context(struct drm_i915_private *i915, struct drm_file *file) struct i915_gem_context * kernel_context(struct drm_i915_private *i915) { - return i915_gem_context_create_kernel(i915, I915_PRIORITY_NORMAL); + struct i915_gem_context *ctx; + + ctx = i915_gem_create_context(i915, 0); + if (IS_ERR(ctx)) + return ctx; + + GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); + i915_gem_context_clear_bannable(ctx); + + return ctx; } void kernel_context_close(struct i915_gem_context *ctx) diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 7075d03f508f..8b71b2ccbf27 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -31,8 +31,7 @@ void intel_context_free(struct intel_context *ce) } struct intel_context * -intel_context_create(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) +intel_context_create(struct intel_engine_cs *engine) { struct intel_context *ce; @@ -40,7 +39,7 @@ intel_context_create(struct i915_gem_context *ctx, if (!ce) return ERR_PTR(-ENOMEM); - intel_context_init(ce, ctx, engine); + intel_context_init(ce, engine); return ce; } @@ -72,8 +71,6 @@ int __intel_context_do_pin(struct intel_context *ce) ce->engine->name, ce->timeline->fence_context, ce->ring->head, ce->ring->tail); - i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */ - smp_mb__before_atomic(); /* flush pin before it is visible */ } @@ -103,7 +100,6 @@ void intel_context_unpin(struct intel_context *ce) ce->ops->unpin(ce); - i915_gem_context_put(ce->gem_context); intel_context_active_release(ce); } @@ -198,7 +194,7 @@ int intel_context_active_acquire(struct intel_context *ce) return err; /* Preallocate tracking nodes */ - if (!i915_gem_context_is_kernel(ce->gem_context)) { + if (ce->gem_context) { err = i915_active_acquire_preallocate_barrier(&ce->active, ce->engine); if (err) { @@ -219,33 +215,19 @@ void intel_context_active_release(struct intel_context *ce) void intel_context_init(struct intel_context *ce, - struct i915_gem_context *ctx, struct intel_engine_cs *engine) { - struct i915_address_space *vm; - GEM_BUG_ON(!engine->cops); + GEM_BUG_ON(!engine->gt->vm); kref_init(&ce->ref); - ce->gem_context = ctx; - rcu_read_lock(); - vm = rcu_dereference(ctx->vm); - if (vm) - ce->vm = i915_vm_get(vm); - else - ce->vm = i915_vm_get(&engine->gt->ggtt->vm); - rcu_read_unlock(); - if (ctx->timeline) - ce->timeline = intel_timeline_get(ctx->timeline); - if (ctx->sched.priority >= I915_PRIORITY_NORMAL && - intel_engine_has_semaphores(engine)) - __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); - ce->engine = engine; ce->ops = engine->cops; ce->sseu = engine->sseu; - ce->ring = __intel_context_ring_size(SZ_16K); + ce->ring = __intel_context_ring_size(SZ_4K); + + ce->vm = i915_vm_get(engine->gt->vm); INIT_LIST_HEAD(&ce->signal_link); INIT_LIST_HEAD(&ce->signals); diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index d7b667a26e08..15bc46e1b308 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -18,13 +18,11 @@ #include "intel_timeline_types.h" void intel_context_init(struct intel_context *ce, - struct i915_gem_context *ctx, struct intel_engine_cs *engine); void intel_context_fini(struct intel_context *ce); struct intel_context * -intel_context_create(struct i915_gem_context *ctx, - struct intel_engine_cs *engine); +intel_context_create(struct intel_engine_cs *engine); void intel_context_free(struct intel_context *ce); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index efde3f8e0559..b38ea44ab761 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -756,12 +756,10 @@ create_kernel_context(struct intel_engine_cs *engine) struct intel_context *ce; int err; - ce = intel_context_create(engine->i915->kernel_context, engine); + ce = intel_context_create(engine); if (IS_ERR(ce)) return ce; - ce->ring = __intel_context_ring_size(SZ_4K); - err = intel_context_pin(ce); if (err) { intel_context_put(ce); @@ -797,6 +795,12 @@ int intel_engine_init_common(struct intel_engine_cs *engine) engine->set_default_submission(engine); + ret = measure_breadcrumb_dw(engine); + if (ret < 0) + return ret; + + engine->emit_fini_breadcrumb_dw = ret; + /* * We may need to do things with the shrinker which * require us to immediately switch back to the default @@ -811,18 +815,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine) engine->kernel_context = ce; - ret = measure_breadcrumb_dw(engine); - if (ret < 0) - goto err_unpin; - - engine->emit_fini_breadcrumb_dw = ret; - return 0; - -err_unpin: - intel_context_unpin(ce); - intel_context_put(ce); - return ret; } /** diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 4c26daf7ee46..f9fb0865fbb6 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -73,7 +73,6 @@ int intel_gt_init_hw(struct intel_gt *gt) struct intel_uncore *uncore = gt->uncore; int ret; - BUG_ON(!i915->kernel_context); ret = intel_gt_terminally_wedged(gt); if (ret) return ret; @@ -364,6 +363,14 @@ static void intel_gt_fini_scratch(struct intel_gt *gt) i915_vma_unpin_and_release(>->scratch, 0); } +static struct i915_address_space *kernel_vm(struct intel_gt *gt) +{ + if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING) + return &i915_ppgtt_create(gt->i915)->vm; + else + return i915_vm_get(>->ggtt->vm); +} + int intel_gt_init(struct intel_gt *gt) { int err; @@ -374,7 +381,17 @@ int intel_gt_init(struct intel_gt *gt) intel_gt_pm_init(gt); + gt->vm = kernel_vm(gt); + if (!gt->vm) { + err = -ENOMEM; + goto err_scratch; + } + return 0; + +err_scratch: + intel_gt_fini_scratch(gt); + return err; } void intel_gt_driver_remove(struct intel_gt *gt) @@ -389,6 +406,13 @@ void intel_gt_driver_unregister(struct intel_gt *gt) void intel_gt_driver_release(struct intel_gt *gt) { + struct i915_address_space *vm; + + vm = fetch_and_zero(>->vm); + if (!vm) /* FIXME being called twice on error paths :( */ + return; + + i915_vm_put(vm); intel_gt_pm_fini(gt); intel_gt_fini_scratch(gt); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index d4e14dbd172e..96890dd12b5f 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -90,6 +90,13 @@ struct intel_gt { struct intel_engine_cs *engine[I915_NUM_ENGINES]; struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1] [MAX_ENGINE_INSTANCE + 1]; + + /* + * Default address space (either GGTT or ppGTT depending on arch). + * + * Reserved for exclusive use by the kernel. + */ + struct i915_address_space *vm; }; enum intel_gt_scratch_field { diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 5b23aeb50385..242e6927675c 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -133,12 +133,11 @@ */ #include <linux/interrupt.h> -#include "gem/i915_gem_context.h" - #include "i915_drv.h" #include "i915_perf.h" #include "i915_trace.h" #include "i915_vgpu.h" +#include "intel_context.h" #include "intel_engine_pm.h" #include "intel_gt.h" #include "intel_gt_pm.h" @@ -4393,8 +4392,7 @@ virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal) } struct intel_context * -intel_execlists_create_virtual(struct i915_gem_context *ctx, - struct intel_engine_cs **siblings, +intel_execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count) { struct virtual_engine *ve; @@ -4405,13 +4403,13 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx, return ERR_PTR(-EINVAL); if (count == 1) - return intel_context_create(ctx, siblings[0]); + return intel_context_create(siblings[0]); ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL); if (!ve) return ERR_PTR(-ENOMEM); - ve->base.i915 = ctx->i915; + ve->base.i915 = siblings[0]->i915; ve->base.gt = siblings[0]->gt; ve->base.uncore = siblings[0]->uncore; ve->base.id = -1; @@ -4454,7 +4452,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx, virtual_submission_tasklet, (unsigned long)ve); - intel_context_init(&ve->context, ctx, &ve->base); + intel_context_init(&ve->context, &ve->base); for (n = 0; n < count; n++) { struct intel_engine_cs *sibling = siblings[n]; @@ -4535,14 +4533,12 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx, } struct intel_context * -intel_execlists_clone_virtual(struct i915_gem_context *ctx, - struct intel_engine_cs *src) +intel_execlists_clone_virtual(struct intel_engine_cs *src) { struct virtual_engine *se = to_virtual_engine(src); struct intel_context *dst; - dst = intel_execlists_create_virtual(ctx, - se->siblings, + dst = intel_execlists_create_virtual(se->siblings, se->num_siblings); if (IS_ERR(dst)) return dst; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h index faa2d56c279b..faa496ee71bf 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc.h @@ -130,13 +130,11 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine, unsigned int max); struct intel_context * -intel_execlists_create_virtual(struct i915_gem_context *ctx, - struct intel_engine_cs **siblings, +intel_execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count); struct intel_context * -intel_execlists_clone_virtual(struct i915_gem_context *ctx, - struct intel_engine_cs *src); +intel_execlists_clone_virtual(struct intel_engine_cs *src); int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine, const struct intel_engine_cs *master, diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index b4c0ad4a6081..98e7bba5979e 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -86,6 +86,9 @@ static bool mark_guilty(struct i915_request *rq) bool banned; int i; + if (!ctx) + return false; + atomic_inc(&ctx->guilty_count); /* Cool contexts are too cool to be banned! (Used for reset testing.) */ @@ -115,7 +118,8 @@ static bool mark_guilty(struct i915_request *rq) static void mark_innocent(struct i915_request *rq) { - atomic_inc(&rq->context->gem_context->active_count); + if (rq->context->gem_context) + atomic_inc(&rq->context->gem_context->active_count); } void __i915_request_reset(struct i915_request *rq, bool guilty) diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 1c2f3c3a8840..6ddfe8a86dbe 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -1558,7 +1558,7 @@ static int remap_l3(struct i915_request *rq) struct i915_gem_context *ctx = rq->context->gem_context; int i, err; - if (!ctx->remap_slice) + if (!ctx || !ctx->remap_slice) return 0; for (i = 0; i < MAX_L3_SLICES; i++) { @@ -1601,7 +1601,7 @@ static int switch_context(struct i915_request *rq) * is purely used for flushing user contexts. */ hw_flags = 0; - if (i915_gem_context_is_kernel(rq->context->gem_context)) + if (!rq->context->gem_context) hw_flags = MI_RESTORE_INHIBIT; ret = mi_set_context(rq, hw_flags); diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c index f63a26a3e620..6b7048636c43 100644 --- a/drivers/gpu/drm/i915/gt/selftest_context.c +++ b/drivers/gpu/drm/i915/gt/selftest_context.c @@ -67,15 +67,14 @@ static int context_sync(struct intel_context *ce) return err; } -static int __live_context_size(struct intel_engine_cs *engine, - struct i915_gem_context *fixme) +static int __live_context_size(struct intel_engine_cs *engine) { struct intel_context *ce; struct i915_request *rq; void *vaddr; int err; - ce = intel_context_create(fixme, engine); + ce = intel_context_create(engine); if (IS_ERR(ce)) return PTR_ERR(ce); @@ -146,7 +145,6 @@ static int live_context_size(void *arg) { struct intel_gt *gt = arg; struct intel_engine_cs *engine; - struct i915_gem_context *fixme; enum intel_engine_id id; int err = 0; @@ -155,10 +153,6 @@ static int live_context_size(void *arg) * HW tries to write past the end of one. */ - fixme = kernel_context(gt->i915); - if (IS_ERR(fixme)) - return PTR_ERR(fixme); - for_each_engine(engine, gt, id) { struct { struct drm_i915_gem_object *state; @@ -183,7 +177,7 @@ static int live_context_size(void *arg) /* Overlaps with the execlists redzone */ engine->context_size += I915_GTT_PAGE_SIZE; - err = __live_context_size(engine, fixme); + err = __live_context_size(engine); engine->context_size -= I915_GTT_PAGE_SIZE; @@ -196,12 +190,10 @@ static int live_context_size(void *arg) break; } - kernel_context_close(fixme); return err; } -static int __live_active_context(struct intel_engine_cs *engine, - struct i915_gem_context *fixme) +static int __live_active_context(struct intel_engine_cs *engine) { struct intel_context *ce; int pass; @@ -226,10 +218,15 @@ static int __live_active_context(struct intel_engine_cs *engine, return -EINVAL; } - ce = intel_context_create(fixme, engine); + ce = intel_context_create(engine); if (IS_ERR(ce)) return PTR_ERR(ce); + /* Pretend to be a user context to partake in idle barriers */ + ce->gem_context = kernel_context(engine->i915); + if (IS_ERR(ce->gem_context)) + goto err_ce; + for (pass = 0; pass <= 2; pass++) { struct i915_request *rq; @@ -282,6 +279,8 @@ static int __live_active_context(struct intel_engine_cs *engine, } err: + kernel_context_close(ce->gem_context); +err_ce: intel_context_put(ce); return err; } @@ -290,7 +289,6 @@ static int live_active_context(void *arg) { struct intel_gt *gt = arg; struct intel_engine_cs *engine; - struct i915_gem_context *fixme; enum intel_engine_id id; struct drm_file *file; int err = 0; @@ -299,14 +297,8 @@ static int live_active_context(void *arg) if (IS_ERR(file)) return PTR_ERR(file); - fixme = live_context(gt->i915, file); - if (IS_ERR(fixme)) { - err = PTR_ERR(fixme); - goto out_file; - } - for_each_engine(engine, gt, id) { - err = __live_active_context(engine, fixme); + err = __live_active_context(engine); if (err) break; @@ -315,7 +307,6 @@ static int live_active_context(void *arg) break; } -out_file: mock_file_free(gt->i915, file); return err; } @@ -348,8 +339,7 @@ static int __remote_sync(struct intel_context *ce, struct intel_context *remote) return err; } -static int __live_remote_context(struct intel_engine_cs *engine, - struct i915_gem_context *fixme) +static int __live_remote_context(struct intel_engine_cs *engine) { struct intel_context *local, *remote; int pass; @@ -363,11 +353,16 @@ static int __live_remote_context(struct intel_engine_cs *engine, * clobber the idle-barrier. */ - remote = intel_context_create(fixme, engine); + remote = intel_context_create(engine); if (IS_ERR(remote)) return PTR_ERR(remote); - local = intel_context_create(fixme, engine); + /* Pretend to be a user context to partake in idle barriers */ + remote->gem_context = kernel_context(engine->i915); + if (IS_ERR(remote->gem_context)) + goto err_remote_ce; + + local = intel_context_create(engine); if (IS_ERR(local)) { err = PTR_ERR(local); goto err_remote; @@ -392,6 +387,8 @@ static int __live_remote_context(struct intel_engine_cs *engine, intel_context_put(local); err_remote: + kernel_context_close(remote->gem_context); +err_remote_ce: intel_context_put(remote); return err; } @@ -400,7 +397,6 @@ static int live_remote_context(void *arg) { struct intel_gt *gt = arg; struct intel_engine_cs *engine; - struct i915_gem_context *fixme; enum intel_engine_id id; struct drm_file *file; int err = 0; @@ -409,14 +405,8 @@ static int live_remote_context(void *arg) if (IS_ERR(file)) return PTR_ERR(file); - fixme = live_context(gt->i915, file); - if (IS_ERR(fixme)) { - err = PTR_ERR(fixme); - goto out_file; - } - for_each_engine(engine, gt, id) { - err = __live_remote_context(engine, fixme); + err = __live_remote_context(engine); if (err) break; @@ -425,7 +415,6 @@ static int live_remote_context(void *arg) break; } -out_file: mock_file_free(gt->i915, file); return err; } diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c index 155c508024df..9e7376b592e5 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c @@ -169,8 +169,7 @@ static int __live_heartbeat_fast(struct intel_engine_cs *engine) int err; int i; - ce = intel_context_create(engine->kernel_context->gem_context, - engine); + ce = intel_context_create(engine); if (IS_ERR(ce)) return PTR_ERR(ce); diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 8abc0a1d692b..4f8cd3cbb8bd 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -377,36 +377,30 @@ static int igt_reset_nop(void *arg) struct intel_gt *gt = arg; struct i915_gpu_error *global = >->i915->gpu_error; struct intel_engine_cs *engine; - struct i915_gem_context *ctx; unsigned int reset_count, count; enum intel_engine_id id; - struct drm_file *file; IGT_TIMEOUT(end_time); int err = 0; /* Check that we can reset during non-user portions of requests */ - file = mock_file(gt->i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - ctx = live_context(gt->i915, file); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto out; - } - - i915_gem_context_clear_bannable(ctx); reset_count = i915_reset_count(global); count = 0; do { for_each_engine(engine, gt, id) { + struct intel_context *ce; int i; + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + break; + } + for (i = 0; i < 16; i++) { struct i915_request *rq; - rq = igt_request_alloc(ctx, engine); + rq = intel_context_create_request(ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); break; @@ -414,6 +408,8 @@ static int igt_reset_nop(void *arg) i915_request_add(rq); } + + intel_context_put(ce); } igt_global_reset_lock(gt); @@ -437,10 +433,7 @@ static int igt_reset_nop(void *arg) } while (time_before(jiffies, end_time)); pr_info("%s: %d resets\n", __func__, count); - err = igt_flush_test(gt->i915); -out: - mock_file_free(gt->i915, file); - if (intel_gt_is_wedged(gt)) + if (igt_flush_test(gt->i915)) err = -EIO; return err; } diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 95504c48cd99..04d0dd6a938a 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -53,11 +53,10 @@ static struct i915_vma *create_scratch(struct intel_gt *gt) static int live_sanitycheck(void *arg) { struct intel_gt *gt = arg; - struct i915_gem_engines_iter it; - struct i915_gem_context *ctx; - struct intel_context *ce; + struct intel_engine_cs *engine; + enum intel_engine_id id; struct igt_spinner spin; - int err = -ENOMEM; + int err = 0; if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915)) return 0; @@ -65,17 +64,20 @@ static int live_sanitycheck(void *arg) if (igt_spinner_init(&spin, gt)) return -ENOMEM; - ctx = kernel_context(gt->i915); - if (!ctx) - goto err_spin; - - for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + for_each_engine(engine, gt, id) { + struct intel_context *ce; struct i915_request *rq; + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + break; + } + rq = igt_spinner_create_request(&spin, ce, MI_NOOP); if (IS_ERR(rq)) { err = PTR_ERR(rq); - goto err_ctx; + goto out_ctx; } i915_request_add(rq); @@ -84,21 +86,21 @@ static int live_sanitycheck(void *arg) GEM_TRACE_DUMP(); intel_gt_set_wedged(gt); err = -EIO; - goto err_ctx; + goto out_ctx; } igt_spinner_end(&spin); if (igt_flush_test(gt->i915)) { err = -EIO; - goto err_ctx; + goto out_ctx; } + +out_ctx: + intel_context_put(ce); + if (err) + break; } - err = 0; -err_ctx: - i915_gem_context_unlock_engines(ctx); - kernel_context_close(ctx); -err_spin: igt_spinner_fini(&spin); return err; } @@ -144,7 +146,7 @@ static int live_unlite_restore(struct intel_gt *gt, int prio) for (n = 0; n < ARRAY_SIZE(ce); n++) { struct intel_context *tmp; - tmp = intel_context_create(ctx, engine); + tmp = intel_context_create(engine); if (IS_ERR(tmp)) { err = PTR_ERR(tmp); goto err_ce; @@ -309,17 +311,17 @@ emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx) static struct i915_request * semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx) { - struct i915_gem_context *ctx; + struct intel_context *ce; struct i915_request *rq; int err; - ctx = kernel_context(engine->i915); - if (!ctx) - return ERR_PTR(-ENOMEM); + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return ERR_CAST(ce); - rq = igt_request_alloc(ctx, engine); + rq = intel_context_create_request(ce); if (IS_ERR(rq)) - goto out_ctx; + goto out_ce; err = 0; if (rq->engine->emit_init_breadcrumb) @@ -332,8 +334,8 @@ semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx) if (err) rq = ERR_PTR(err); -out_ctx: - kernel_context_close(ctx); +out_ce: + intel_context_put(ce); return rq; } @@ -2390,27 +2392,17 @@ static int nop_virtual_engine(struct intel_gt *gt, { IGT_TIMEOUT(end_time); struct i915_request *request[16]; - struct i915_gem_context *ctx[16]; struct intel_context *ve[16]; unsigned long n, prime, nc; struct igt_live_test t; ktime_t times[2] = {}; int err; - GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx)); + GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve)); for (n = 0; n < nctx; n++) { - ctx[n] = kernel_context(gt->i915); - if (!ctx[n]) { - err = -ENOMEM; - nctx = n; - goto out; - } - - ve[n] = intel_execlists_create_virtual(ctx[n], - siblings, nsibling); + ve[n] = intel_execlists_create_virtual(siblings, nsibling); if (IS_ERR(ve[n])) { - kernel_context_close(ctx[n]); err = PTR_ERR(ve[n]); nctx = n; goto out; @@ -2419,7 +2411,6 @@ static int nop_virtual_engine(struct intel_gt *gt, err = intel_context_pin(ve[n]); if (err) { intel_context_put(ve[n]); - kernel_context_close(ctx[n]); nctx = n; goto out; } @@ -2500,7 +2491,6 @@ static int nop_virtual_engine(struct intel_gt *gt, for (nc = 0; nc < nctx; nc++) { intel_context_unpin(ve[nc]); intel_context_put(ve[nc]); - kernel_context_close(ctx[nc]); } return err; } @@ -2559,7 +2549,6 @@ static int mask_virtual_engine(struct intel_gt *gt, unsigned int nsibling) { struct i915_request *request[MAX_ENGINE_INSTANCE + 1]; - struct i915_gem_context *ctx; struct intel_context *ve; struct igt_live_test t; unsigned int n; @@ -2570,11 +2559,7 @@ static int mask_virtual_engine(struct intel_gt *gt, * restrict it to our desired engine within the virtual engine. */ - ctx = kernel_context(gt->i915); - if (!ctx) - return -ENOMEM; - - ve = intel_execlists_create_virtual(ctx, siblings, nsibling); + ve = intel_execlists_create_virtual(siblings, nsibling); if (IS_ERR(ve)) { err = PTR_ERR(ve); goto out_close; @@ -2642,7 +2627,6 @@ static int mask_virtual_engine(struct intel_gt *gt, out_put: intel_context_put(ve); out_close: - kernel_context_close(ctx); return err; } @@ -2682,7 +2666,6 @@ static int preserved_virtual_engine(struct intel_gt *gt, unsigned int nsibling) { struct i915_request *last = NULL; - struct i915_gem_context *ctx; struct intel_context *ve; struct i915_vma *scratch; struct igt_live_test t; @@ -2690,17 +2673,11 @@ static int preserved_virtual_engine(struct intel_gt *gt, int err = 0; u32 *cs; - ctx = kernel_context(gt->i915); - if (!ctx) - return -ENOMEM; - scratch = create_scratch(siblings[0]->gt); - if (IS_ERR(scratch)) { - err = PTR_ERR(scratch); - goto out_close; - } + if (IS_ERR(scratch)) + return PTR_ERR(scratch); - ve = intel_execlists_create_virtual(ctx, siblings, nsibling); + ve = intel_execlists_create_virtual(siblings, nsibling); if (IS_ERR(ve)) { err = PTR_ERR(ve); goto out_scratch; @@ -2783,8 +2760,6 @@ static int preserved_virtual_engine(struct intel_gt *gt, intel_context_put(ve); out_scratch: i915_vma_unpin_and_release(&scratch, 0); -out_close: - kernel_context_close(ctx); return err; } @@ -2878,9 +2853,7 @@ static int bond_virtual_engine(struct intel_gt *gt, for (n = 0; n < nsibling; n++) { struct intel_context *ve; - ve = intel_execlists_create_virtual(ctx, - siblings, - nsibling); + ve = intel_execlists_create_virtual(siblings, nsibling); if (IS_ERR(ve)) { err = PTR_ERR(ve); onstack_fence_fini(&fence); @@ -3268,8 +3241,7 @@ static int live_lrc_fixed(void *arg) return err; } -static int __live_lrc_state(struct i915_gem_context *fixme, - struct intel_engine_cs *engine, +static int __live_lrc_state(struct intel_engine_cs *engine, struct i915_vma *scratch) { struct intel_context *ce; @@ -3284,7 +3256,7 @@ static int __live_lrc_state(struct i915_gem_context *fixme, int err; int n; - ce = intel_context_create(fixme, engine); + ce = intel_context_create(engine); if (IS_ERR(ce)) return PTR_ERR(ce); @@ -3358,7 +3330,6 @@ static int live_lrc_state(void *arg) { struct intel_gt *gt = arg; struct intel_engine_cs *engine; - struct i915_gem_context *fixme; struct i915_vma *scratch; enum intel_engine_id id; int err = 0; @@ -3368,18 +3339,12 @@ static int live_lrc_state(void *arg) * intel_context. */ - fixme = kernel_context(gt->i915); - if (!fixme) - return -ENOMEM; - scratch = create_scratch(gt); - if (IS_ERR(scratch)) { - err = PTR_ERR(scratch); - goto out_close; - } + if (IS_ERR(scratch)) + return PTR_ERR(scratch); for_each_engine(engine, gt, id) { - err = __live_lrc_state(fixme, engine, scratch); + err = __live_lrc_state(engine, scratch); if (err) break; } @@ -3388,8 +3353,6 @@ static int live_lrc_state(void *arg) err = -EIO; i915_vma_unpin_and_release(&scratch, 0); -out_close: - kernel_context_close(fixme); return err; } @@ -3422,8 +3385,7 @@ static int gpr_make_dirty(struct intel_engine_cs *engine) return 0; } -static int __live_gpr_clear(struct i915_gem_context *fixme, - struct intel_engine_cs *engine, +static int __live_gpr_clear(struct intel_engine_cs *engine, struct i915_vma *scratch) { struct intel_context *ce; @@ -3439,7 +3401,7 @@ static int __live_gpr_clear(struct i915_gem_context *fixme, if (err) return err; - ce = intel_context_create(fixme, engine); + ce = intel_context_create(engine); if (IS_ERR(ce)) return PTR_ERR(ce); @@ -3501,7 +3463,6 @@ static int live_gpr_clear(void *arg) { struct intel_gt *gt = arg; struct intel_engine_cs *engine; - struct i915_gem_context *fixme; struct i915_vma *scratch; enum intel_engine_id id; int err = 0; @@ -3511,18 +3472,12 @@ static int live_gpr_clear(void *arg) * to avoid leaking any information from previous contexts. */ - fixme = kernel_context(gt->i915); - if (!fixme) - return -ENOMEM; - scratch = create_scratch(gt); - if (IS_ERR(scratch)) { - err = PTR_ERR(scratch); - goto out_close; - } + if (IS_ERR(scratch)) + return PTR_ERR(scratch); for_each_engine(engine, gt, id) { - err = __live_gpr_clear(fixme, engine, scratch); + err = __live_gpr_clear(engine, scratch); if (err) break; } @@ -3531,8 +3486,6 @@ static int live_gpr_clear(void *arg) err = -EIO; i915_vma_unpin_and_release(&scratch, 0); -out_close: - kernel_context_close(fixme); return err; } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 976d6950218e..f0785b9fe02c 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -35,12 +35,12 @@ #include <linux/kthread.h> -#include "gem/i915_gem_context.h" #include "gem/i915_gem_pm.h" #include "gt/intel_context.h" #include "gt/intel_ring.h" #include "i915_drv.h" +#include "i915_gem_gtt.h" #include "gvt.h" #define RING_CTX_OFF(x) \ @@ -1220,16 +1220,14 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct intel_vgpu_submission *s = &vgpu->submission; struct intel_engine_cs *engine; - struct i915_gem_context *ctx; struct i915_ppgtt *ppgtt; enum intel_engine_id i; int ret; - ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MAX); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); + ppgtt = i915_ppgtt_create(i915); + if (IS_ERR(ppgtt)) + return PTR_ERR(ppgtt); - ppgtt = i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx)); i915_context_ppgtt_root_save(s, ppgtt); for_each_engine(engine, i915, i) { @@ -1238,12 +1236,14 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) INIT_LIST_HEAD(&s->workload_q_head[i]); s->shadow[i] = ERR_PTR(-EINVAL); - ce = intel_context_create(ctx, engine); + ce = intel_context_create(engine); if (IS_ERR(ce)) { ret = PTR_ERR(ce); goto out_shadow_ctx; } + i915_vm_put(ce->vm); + ce->vm = i915_vm_get(&ppgtt->vm); intel_context_set_single_submission(ce); if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */ @@ -1278,7 +1278,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES); i915_vm_put(&ppgtt->vm); - i915_gem_context_put(ctx); return 0; out_shadow_ctx: @@ -1291,7 +1290,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) intel_context_put(s->shadow[i]); } i915_vm_put(&ppgtt->vm); - i915_gem_context_put(ctx); return ret; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b5de8f928320..e308a9a71ab1 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -956,9 +956,6 @@ struct drm_i915_private { struct pci_dev *bridge_dev; - /* Context used internally to idle the GPU and setup initial state */ - struct i915_gem_context *kernel_context; - struct intel_engine_cs *engine[I915_NUM_ENGINES]; struct rb_root uabi_engines; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index fe348ff7db4f..1063d4c74a7f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1063,8 +1063,7 @@ static int __intel_engines_record_defaults(struct intel_gt *gt) GEM_BUG_ON(!engine->kernel_context); engine->serial++; /* force the kernel context switch */ - ce = intel_context_create(engine->kernel_context->gem_context, - engine); + ce = intel_context_create(engine); if (IS_ERR(ce)) { err = PTR_ERR(ce); goto out; @@ -1219,6 +1218,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) } intel_gt_init(&dev_priv->gt); + i915_gem_init_contexts(dev_priv); ret = intel_engines_setup(&dev_priv->gt); if (ret) { @@ -1226,16 +1226,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv) goto err_unlock; } - ret = i915_gem_init_contexts(dev_priv); - if (ret) { - GEM_BUG_ON(ret == -EIO); - goto err_scratch; - } - ret = intel_engines_init(&dev_priv->gt); if (ret) { GEM_BUG_ON(ret == -EIO); - goto err_context; + goto err_scratch; } intel_uc_init(&dev_priv->gt.uc); @@ -1299,9 +1293,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv) intel_uc_fini(&dev_priv->gt.uc); intel_engines_cleanup(&dev_priv->gt); } -err_context: - if (ret != -EIO) - i915_gem_driver_release__contexts(dev_priv); err_scratch: intel_gt_driver_release(&dev_priv->gt); err_unlock: @@ -1367,7 +1358,6 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv) void i915_gem_driver_release(struct drm_i915_private *dev_priv) { intel_engines_cleanup(&dev_priv->gt); - i915_gem_driver_release__contexts(dev_priv); intel_gt_driver_release(&dev_priv->gt); intel_wa_list_free(&dev_priv->gt_wa_list); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 88179202c556..5e207412a459 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1363,12 +1363,8 @@ static int gen8_init_scratch(struct i915_address_space *vm) * If everybody agrees to not to write into the scratch page, * we can reuse it for all vm, keeping contexts and processes separate. */ - if (vm->has_read_only && - vm->i915->kernel_context && - vm->i915->kernel_context->vm) { - struct i915_address_space *clone = - rcu_dereference_protected(vm->i915->kernel_context->vm, - true); /* static */ + if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) { + struct i915_address_space *clone = vm->gt->vm; GEM_BUG_ON(!clone->has_read_only); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index b40157cfae14..598425a2be62 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1231,9 +1231,11 @@ static void record_request(const struct i915_request *request, erq->head = request->head; erq->tail = request->tail; - rcu_read_lock(); - erq->pid = ctx && ctx->pid ? pid_nr(ctx->pid) : 0; - rcu_read_unlock(); + if (ctx) { + rcu_read_lock(); + erq->pid = ctx && ctx->pid ? pid_nr(ctx->pid) : 0; + rcu_read_unlock(); + } } static void engine_record_requests(struct intel_engine_cs *engine, diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 6e35b9255882..f1147fbb1dc0 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -2340,9 +2340,6 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream, */ spin_lock(&i915->gem.contexts.lock); list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) { - if (ctx == i915->kernel_context) - continue; - if (!kref_get_unless_zero(&ctx->ref)) continue; diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index b9bccdb9ee6c..3b9c76dd08f0 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1227,8 +1227,8 @@ void __i915_request_queue(struct i915_request *rq, void i915_request_add(struct i915_request *rq) { - struct i915_sched_attr attr = rq->context->gem_context->sched; struct intel_timeline * const tl = i915_request_timeline(rq); + struct i915_sched_attr attr = {}; struct i915_request *prev; lockdep_assert_held(&tl->mutex); @@ -1238,6 +1238,9 @@ void i915_request_add(struct i915_request *rq) prev = __i915_request_commit(rq); + if (rq->context->gem_context) + attr = rq->context->gem_context->sched; + /* * Boost actual workloads past semaphores! * diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 30ae34f62176..9ddade4eacf8 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -739,10 +739,8 @@ static int live_empty_request(void *arg) static struct i915_vma *recursive_batch(struct drm_i915_private *i915) { - struct i915_gem_context *ctx = i915->kernel_context; struct drm_i915_gem_object *obj; const int gen = INTEL_GEN(i915); - struct i915_address_space *vm; struct i915_vma *vma; u32 *cmd; int err; @@ -751,9 +749,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915) if (IS_ERR(obj)) return ERR_CAST(obj); - vm = i915_gem_context_get_vm_rcu(ctx); - vma = i915_vma_instance(obj, vm, NULL); - i915_vm_put(vm); + vma = i915_vma_instance(obj, i915->gt.vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err; diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 27ed3cee6a9b..98a754faf222 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -63,7 +63,6 @@ static void mock_device_release(struct drm_device *dev) for_each_engine(engine, &i915->gt, id) mock_engine_free(engine); - i915_gem_driver_release__contexts(i915); intel_timelines_fini(i915); @@ -183,6 +182,7 @@ struct drm_i915_private *mock_gem_device(void) intel_timelines_init(i915); mock_init_ggtt(i915, &i915->ggtt); + i915->gt.vm = i915_vm_get(&i915->ggtt.vm); mkwrite_device_info(i915)->engine_mask = BIT(0); @@ -190,10 +190,6 @@ struct drm_i915_private *mock_gem_device(void) if (!i915->engine[RCS0]) goto err_unlock; - i915->kernel_context = mock_context(i915, NULL); - if (!i915->kernel_context) - goto err_engine; - if (mock_engine_init(i915->engine[RCS0])) goto err_context; @@ -202,8 +198,6 @@ struct drm_i915_private *mock_gem_device(void) return i915; err_context: - i915_gem_driver_release__contexts(i915); -err_engine: mock_engine_free(i915->engine[RCS0]); err_unlock: intel_timelines_fini(i915); -- 2.24.0.rc1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx