We no longer need struct_mutex to serialise request emission, so remove it from the gt selftests. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- .../gpu/drm/i915/gem/selftests/huge_pages.c | 15 +- .../drm/i915/gem/selftests/i915_gem_mman.c | 2 - .../drm/i915/gem/selftests/i915_gem_phys.c | 2 - drivers/gpu/drm/i915/gt/selftest_lrc.c | 148 +++--------------- .../gpu/drm/i915/gt/selftest_workarounds.c | 11 +- drivers/gpu/drm/i915/gt/uc/selftest_guc.c | 4 - 6 files changed, 27 insertions(+), 155 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index e204e653b459..75e717f7bb5a 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -1633,7 +1633,6 @@ int i915_gem_huge_page_mock_selftests(void) mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL; mkwrite_device_info(dev_priv)->ppgtt_size = 48; - mutex_lock(&dev_priv->drm.struct_mutex); ppgtt = i915_ppgtt_create(dev_priv); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); @@ -1659,9 +1658,7 @@ int i915_gem_huge_page_mock_selftests(void) i915_vm_put(&ppgtt->vm); out_unlock: - mutex_unlock(&dev_priv->drm.struct_mutex); drm_dev_put(&dev_priv->drm); - return err; } @@ -1677,7 +1674,6 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915) }; struct drm_file *file; struct i915_gem_context *ctx; - intel_wakeref_t wakeref; int err; if (!HAS_PPGTT(i915)) { @@ -1692,13 +1688,10 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915) if (IS_ERR(file)) return PTR_ERR(file); - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - ctx = live_context(i915, file); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); - goto out_unlock; + goto out_file; } if (rcu_access_pointer(ctx->vm)) @@ -1706,11 +1699,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915) err = i915_subtests(tests, ctx); -out_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - +out_file: mock_file_free(i915, file); - return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 1cd25cfd0246..cfa52c525691 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -669,9 +669,7 @@ static int igt_mmap_offset_exhaustion(void *arg) goto out; } - mutex_lock(&i915->drm.struct_mutex); err = make_obj_busy(obj); - mutex_unlock(&i915->drm.struct_mutex); if (err) { pr_err("[loop %d] Failed to busy the object\n", loop); goto err_obj; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c index 94a15e3f6db8..34932871b3a5 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c @@ -25,9 +25,7 @@ static int mock_phys_object(void *arg) goto out; } - mutex_lock(&i915->drm.struct_mutex); err = i915_gem_object_attach_phys(obj, PAGE_SIZE); - mutex_unlock(&i915->drm.struct_mutex); if (err) { pr_err("i915_gem_object_attach_phys failed, err=%d\n", err); goto out_obj; diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index e33c2f66c683..89affc958d01 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -26,17 +26,13 @@ static int live_sanitycheck(void *arg) struct i915_gem_context *ctx; struct intel_context *ce; struct igt_spinner spin; - intel_wakeref_t wakeref; int err = -ENOMEM; if (!HAS_LOGICAL_RING_CONTEXTS(i915)) return 0; - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - if (igt_spinner_init(&spin, &i915->gt)) - goto err_unlock; + return -ENOMEM; ctx = kernel_context(i915); if (!ctx) @@ -73,9 +69,6 @@ static int live_sanitycheck(void *arg) kernel_context_close(ctx); err_spin: igt_spinner_fini(&spin); -err_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -224,7 +217,6 @@ static int live_timeslice_preempt(void *arg) { struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; - intel_wakeref_t wakeref; struct i915_vma *vma; void *vaddr; int err = 0; @@ -239,14 +231,9 @@ static int live_timeslice_preempt(void *arg) * ready task. */ - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto err_unlock; - } + if (IS_ERR(obj)) + return PTR_ERR(obj); vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); if (IS_ERR(vma)) { @@ -291,10 +278,6 @@ static int live_timeslice_preempt(void *arg) i915_gem_object_unpin_map(obj); err_obj: i915_gem_object_put(obj); -err_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - return err; } @@ -306,7 +289,6 @@ static int live_busywait_preempt(void *arg) struct drm_i915_gem_object *obj; struct i915_vma *vma; enum intel_engine_id id; - intel_wakeref_t wakeref; int err = -ENOMEM; u32 *map; @@ -315,12 +297,9 @@ static int live_busywait_preempt(void *arg) * preempt the busywaits used to synchronise between rings. */ - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - ctx_hi = kernel_context(i915); if (!ctx_hi) - goto err_unlock; + return -ENOMEM; ctx_hi->sched.priority = I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); @@ -474,9 +453,6 @@ static int live_busywait_preempt(void *arg) kernel_context_close(ctx_lo); err_ctx_hi: kernel_context_close(ctx_hi); -err_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -505,7 +481,6 @@ static int live_preempt(void *arg) struct igt_spinner spin_hi, spin_lo; struct intel_engine_cs *engine; enum intel_engine_id id; - intel_wakeref_t wakeref; int err = -ENOMEM; if (!HAS_LOGICAL_RING_PREEMPTION(i915)) @@ -514,11 +489,8 @@ static int live_preempt(void *arg) if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) pr_err("Logical preemption supported, but not exposed\n"); - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - if (igt_spinner_init(&spin_hi, &i915->gt)) - goto err_unlock; + return -ENOMEM; if (igt_spinner_init(&spin_lo, &i915->gt)) goto err_spin_hi; @@ -598,9 +570,6 @@ static int live_preempt(void *arg) igt_spinner_fini(&spin_lo); err_spin_hi: igt_spinner_fini(&spin_hi); -err_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -612,17 +581,13 @@ static int live_late_preempt(void *arg) struct intel_engine_cs *engine; struct i915_sched_attr attr = {}; enum intel_engine_id id; - intel_wakeref_t wakeref; int err = -ENOMEM; if (!HAS_LOGICAL_RING_PREEMPTION(i915)) return 0; - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - if (igt_spinner_init(&spin_hi, &i915->gt)) - goto err_unlock; + return -ENOMEM; if (igt_spinner_init(&spin_lo, &i915->gt)) goto err_spin_hi; @@ -704,9 +669,6 @@ static int live_late_preempt(void *arg) igt_spinner_fini(&spin_lo); err_spin_hi: igt_spinner_fini(&spin_hi); -err_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; err_wedged: @@ -751,7 +713,6 @@ static int live_nopreempt(void *arg) struct intel_engine_cs *engine; struct preempt_client a, b; enum intel_engine_id id; - intel_wakeref_t wakeref; int err = -ENOMEM; /* @@ -762,11 +723,8 @@ static int live_nopreempt(void *arg) if (!HAS_LOGICAL_RING_PREEMPTION(i915)) return 0; - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - if (preempt_client_init(i915, &a)) - goto err_unlock; + return -ENOMEM; if (preempt_client_init(i915, &b)) goto err_client_a; b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX); @@ -840,9 +798,6 @@ static int live_nopreempt(void *arg) preempt_client_fini(&b); err_client_a: preempt_client_fini(&a); -err_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; err_wedged: @@ -862,7 +817,6 @@ static int live_suppress_self_preempt(void *arg) }; struct preempt_client a, b; enum intel_engine_id id; - intel_wakeref_t wakeref; int err = -ENOMEM; /* @@ -881,11 +835,8 @@ static int live_suppress_self_preempt(void *arg) if (intel_vgpu_active(i915)) return 0; /* GVT forces single port & request submission */ - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - if (preempt_client_init(i915, &a)) - goto err_unlock; + return -ENOMEM; if (preempt_client_init(i915, &b)) goto err_client_a; @@ -966,9 +917,6 @@ static int live_suppress_self_preempt(void *arg) preempt_client_fini(&b); err_client_a: preempt_client_fini(&a); -err_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; err_wedged: @@ -1038,7 +986,6 @@ static int live_suppress_wait_preempt(void *arg) struct preempt_client client[4]; struct intel_engine_cs *engine; enum intel_engine_id id; - intel_wakeref_t wakeref; int err = -ENOMEM; int i; @@ -1051,11 +998,8 @@ static int live_suppress_wait_preempt(void *arg) if (!HAS_LOGICAL_RING_PREEMPTION(i915)) return 0; - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - if (preempt_client_init(i915, &client[0])) /* ELSP[0] */ - goto err_unlock; + return -ENOMEM; if (preempt_client_init(i915, &client[1])) /* ELSP[1] */ goto err_client_0; if (preempt_client_init(i915, &client[2])) /* head of queue */ @@ -1141,9 +1085,6 @@ static int live_suppress_wait_preempt(void *arg) preempt_client_fini(&client[1]); err_client_0: preempt_client_fini(&client[0]); -err_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; err_wedged: @@ -1160,7 +1101,6 @@ static int live_chain_preempt(void *arg) struct intel_engine_cs *engine; struct preempt_client hi, lo; enum intel_engine_id id; - intel_wakeref_t wakeref; int err = -ENOMEM; /* @@ -1172,11 +1112,8 @@ static int live_chain_preempt(void *arg) if (!HAS_LOGICAL_RING_PREEMPTION(i915)) return 0; - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - if (preempt_client_init(i915, &hi)) - goto err_unlock; + return -ENOMEM; if (preempt_client_init(i915, &lo)) goto err_client_hi; @@ -1287,9 +1224,6 @@ static int live_chain_preempt(void *arg) preempt_client_fini(&lo); err_client_hi: preempt_client_fini(&hi); -err_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; err_wedged: @@ -1307,7 +1241,6 @@ static int live_preempt_hang(void *arg) struct igt_spinner spin_hi, spin_lo; struct intel_engine_cs *engine; enum intel_engine_id id; - intel_wakeref_t wakeref; int err = -ENOMEM; if (!HAS_LOGICAL_RING_PREEMPTION(i915)) @@ -1316,11 +1249,8 @@ static int live_preempt_hang(void *arg) if (!intel_has_reset_engine(i915)) return 0; - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - if (igt_spinner_init(&spin_hi, &i915->gt)) - goto err_unlock; + return -ENOMEM; if (igt_spinner_init(&spin_lo, &i915->gt)) goto err_spin_hi; @@ -1412,9 +1342,6 @@ static int live_preempt_hang(void *arg) igt_spinner_fini(&spin_lo); err_spin_hi: igt_spinner_fini(&spin_hi); -err_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -1504,11 +1431,9 @@ static int smoke_crescendo_thread(void *arg) struct i915_gem_context *ctx = smoke_context(smoke); int err; - mutex_lock(&smoke->i915->drm.struct_mutex); err = smoke_submit(smoke, ctx, count % I915_PRIORITY_MAX, smoke->batch); - mutex_unlock(&smoke->i915->drm.struct_mutex); if (err) return err; @@ -1529,8 +1454,6 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) unsigned long count; int err = 0; - mutex_unlock(&smoke->i915->drm.struct_mutex); - for_each_engine(engine, smoke->i915, id) { arg[id] = *smoke; arg[id].engine = engine; @@ -1563,8 +1486,6 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) put_task_struct(tsk[id]); } - mutex_lock(&smoke->i915->drm.struct_mutex); - pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n", count, flags, RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext); @@ -1607,7 +1528,6 @@ static int live_preempt_smoke(void *arg) .ncontext = 1024, }; const unsigned int phase[] = { 0, BATCH }; - intel_wakeref_t wakeref; struct igt_live_test t; int err = -ENOMEM; u32 *cs; @@ -1622,13 +1542,10 @@ static int live_preempt_smoke(void *arg) if (!smoke.contexts) return -ENOMEM; - mutex_lock(&smoke.i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&smoke.i915->runtime_pm); - smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE); if (IS_ERR(smoke.batch)) { err = PTR_ERR(smoke.batch); - goto err_unlock; + goto err_free; } cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB); @@ -1675,9 +1592,7 @@ static int live_preempt_smoke(void *arg) err_batch: i915_gem_object_put(smoke.batch); -err_unlock: - intel_runtime_pm_put(&smoke.i915->runtime_pm, wakeref); - mutex_unlock(&smoke.i915->drm.struct_mutex); +err_free: kfree(smoke.contexts); return err; @@ -1815,19 +1730,17 @@ static int live_virtual_engine(void *arg) struct intel_gt *gt = &i915->gt; enum intel_engine_id id; unsigned int class, inst; - int err = -ENODEV; + int err; if (USES_GUC_SUBMISSION(i915)) return 0; - mutex_lock(&i915->drm.struct_mutex); - for_each_engine(engine, i915, id) { err = nop_virtual_engine(i915, &engine, 1, 1, 0); if (err) { pr_err("Failed to wrap engine %s: err=%d\n", engine->name, err); - goto out_unlock; + return err; } } @@ -1848,17 +1761,15 @@ static int live_virtual_engine(void *arg) err = nop_virtual_engine(i915, siblings, nsibling, n, 0); if (err) - goto out_unlock; + return err; } err = nop_virtual_engine(i915, siblings, nsibling, n, CHAIN); if (err) - goto out_unlock; + return err; } -out_unlock: - mutex_unlock(&i915->drm.struct_mutex); - return err; + return 0; } static int mask_virtual_engine(struct drm_i915_private *i915, @@ -1937,9 +1848,6 @@ static int mask_virtual_engine(struct drm_i915_private *i915, } err = igt_live_test_end(&t); - if (err) - goto out; - out: if (igt_flush_test(i915)) err = -EIO; @@ -1962,13 +1870,11 @@ static int live_virtual_mask(void *arg) struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; struct intel_gt *gt = &i915->gt; unsigned int class, inst; - int err = 0; + int err; if (USES_GUC_SUBMISSION(i915)) return 0; - mutex_lock(&i915->drm.struct_mutex); - for (class = 0; class <= MAX_ENGINE_CLASS; class++) { unsigned int nsibling; @@ -1984,12 +1890,10 @@ static int live_virtual_mask(void *arg) err = mask_virtual_engine(i915, siblings, nsibling); if (err) - goto out_unlock; + return err; } -out_unlock: - mutex_unlock(&i915->drm.struct_mutex); - return err; + return 0; } static int bond_virtual_engine(struct drm_i915_private *i915, @@ -2140,13 +2044,11 @@ static int live_virtual_bond(void *arg) struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; struct intel_gt *gt = &i915->gt; unsigned int class, inst; - int err = 0; + int err; if (USES_GUC_SUBMISSION(i915)) return 0; - mutex_lock(&i915->drm.struct_mutex); - for (class = 0; class <= MAX_ENGINE_CLASS; class++) { const struct phase *p; int nsibling; @@ -2169,14 +2071,12 @@ static int live_virtual_bond(void *arg) if (err) { pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n", __func__, p->name, class, nsibling, err); - goto out_unlock; + return err; } } } -out_unlock: - mutex_unlock(&i915->drm.struct_mutex); - return err; + return 0; } int intel_execlists_live_selftests(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index ed44333d9e20..a10990b1a72c 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -704,9 +704,7 @@ static int live_dirty_whitelist(void *arg) wakeref = intel_runtime_pm_get(&i915->runtime_pm); - mutex_unlock(&i915->drm.struct_mutex); file = mock_file(i915); - mutex_lock(&i915->drm.struct_mutex); if (IS_ERR(file)) { err = PTR_ERR(file); goto out_rpm; @@ -728,9 +726,7 @@ static int live_dirty_whitelist(void *arg) } out_file: - mutex_unlock(&i915->drm.struct_mutex); mock_file_free(i915, file); - mutex_lock(&i915->drm.struct_mutex); out_rpm: intel_runtime_pm_put(&i915->runtime_pm, wakeref); return err; @@ -1264,14 +1260,9 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915) SUBTEST(live_gpu_reset_workarounds), SUBTEST(live_engine_reset_workarounds), }; - int err; if (intel_gt_is_wedged(&i915->gt)) return 0; - mutex_lock(&i915->drm.struct_mutex); - err = i915_subtests(tests, i915); - mutex_unlock(&i915->drm.struct_mutex); - - return err; + return i915_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c index bba0eafe1cdb..f927f851aadf 100644 --- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c @@ -116,7 +116,6 @@ static int igt_guc_clients(void *args) int err = 0; GEM_BUG_ON(!HAS_GT_UC(dev_priv)); - mutex_lock(&dev_priv->drm.struct_mutex); wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); guc = &dev_priv->gt.uc.guc; @@ -190,7 +189,6 @@ static int igt_guc_clients(void *args) guc_clients_enable(guc); unlock: intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - mutex_unlock(&dev_priv->drm.struct_mutex); return err; } @@ -208,7 +206,6 @@ static int igt_guc_doorbells(void *arg) u16 db_id; GEM_BUG_ON(!HAS_GT_UC(dev_priv)); - mutex_lock(&dev_priv->drm.struct_mutex); wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); guc = &dev_priv->gt.uc.guc; @@ -299,7 +296,6 @@ static int igt_guc_doorbells(void *arg) } unlock: intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - mutex_unlock(&dev_priv->drm.struct_mutex); return err; } -- 2.23.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx