The functions where internally already only using the structure, so we need to just flip the interface. v2: rebase Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@xxxxxxxxx> --- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 11 +++-- drivers/gpu/drm/i915/gem/i915_gem_object.c | 4 +- drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 4 +- .../gpu/drm/i915/gem/selftests/huge_pages.c | 4 +- .../i915/gem/selftests/i915_gem_coherency.c | 4 +- .../drm/i915/gem/selftests/i915_gem_context.c | 12 ++--- .../drm/i915/gem/selftests/i915_gem_mman.c | 4 +- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 8 +-- drivers/gpu/drm/i915/gt/intel_hangcheck.c | 4 +- drivers/gpu/drm/i915/gt/intel_reset.c | 4 +- drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 12 ++--- drivers/gpu/drm/i915/gt/selftest_lrc.c | 36 +++++++------- drivers/gpu/drm/i915/gt/selftest_reset.c | 4 +- .../gpu/drm/i915/gt/selftest_workarounds.c | 12 ++--- drivers/gpu/drm/i915/gvt/aperture_gm.c | 15 +++--- drivers/gpu/drm/i915/gvt/gvt.h | 4 +- drivers/gpu/drm/i915/gvt/sched_policy.c | 4 +- drivers/gpu/drm/i915/gvt/scheduler.c | 4 +- drivers/gpu/drm/i915/i915_debugfs.c | 49 ++++++++++--------- drivers/gpu/drm/i915/i915_gem.c | 15 +++--- drivers/gpu/drm/i915/i915_gem_fence_reg.c | 4 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 6 +-- drivers/gpu/drm/i915/i915_perf.c | 6 +-- drivers/gpu/drm/i915/i915_pmu.c | 12 ++--- drivers/gpu/drm/i915/i915_sysfs.c | 12 ++--- drivers/gpu/drm/i915/intel_display.c | 12 ++--- drivers/gpu/drm/i915/intel_display_power.c | 37 +++++++------- drivers/gpu/drm/i915/intel_fbdev.c | 6 +-- drivers/gpu/drm/i915/intel_hotplug.c | 4 +- drivers/gpu/drm/i915/intel_runtime_pm.c | 43 +++++++--------- drivers/gpu/drm/i915/intel_runtime_pm.h | 26 +++++----- drivers/gpu/drm/i915/intel_wakeref.c | 8 +-- drivers/gpu/drm/i915/selftests/i915_active.c | 8 +-- drivers/gpu/drm/i915/selftests/i915_gem.c | 4 +- .../gpu/drm/i915/selftests/i915_gem_evict.c | 4 +- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 8 +-- drivers/gpu/drm/i915/selftests/i915_request.c | 20 ++++---- .../gpu/drm/i915/selftests/i915_timeline.c | 16 +++--- drivers/gpu/drm/i915/selftests/i915_vma.c | 4 +- drivers/gpu/drm/i915/selftests/intel_guc.c | 8 +-- drivers/gpu/drm/i915/selftests/intel_uncore.c | 4 +- 41 files changed, 234 insertions(+), 232 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index caa61f09f714..8df7900e3435 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -222,6 +222,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data); struct drm_device *dev = obj->base.dev; struct drm_i915_private *i915 = to_i915(dev); + struct intel_runtime_pm *rpm = &i915->runtime_pm; struct i915_ggtt *ggtt = &i915->ggtt; bool write = area->vm_flags & VM_WRITE; intel_wakeref_t wakeref; @@ -243,7 +244,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) if (ret) goto err; - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(rpm); srcu = i915_reset_trylock(i915); if (srcu < 0) { @@ -308,7 +309,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) goto err_fence; /* Mark as being mmapped into userspace for later revocation */ - assert_rpm_wakelock_held(&i915->runtime_pm); + assert_rpm_wakelock_held(rpm); if (!i915_vma_set_userfault(vma) && !obj->userfault_count++) list_add(&obj->userfault_link, &i915->mm.userfault_list); if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) @@ -327,7 +328,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) err_reset: i915_reset_unlock(i915, srcu); err_rpm: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(rpm, wakeref); i915_gem_object_unpin_pages(obj); err: switch (ret) { @@ -410,7 +411,7 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) * wakeref. */ lockdep_assert_held(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (!obj->userfault_count) goto out; @@ -427,7 +428,7 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) wmb(); out: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); } static int create_mmap_offset(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index d02a1aff2058..4b6f14ecb270 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -181,7 +181,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, struct drm_i915_gem_object *obj, *on; intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); llist_for_each_entry_safe(obj, on, freed, freed) { struct i915_vma *vma, *vn; @@ -243,7 +243,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, cond_resched(); } - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); } void i915_gem_flush_free_objects(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 70a4c9d3c098..961ca53c8976 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -180,7 +180,7 @@ i915_gem_shrink(struct drm_i915_private *i915, * we will force the wake during oom-notifier. */ if (shrink & I915_SHRINK_BOUND) { - wakeref = intel_runtime_pm_get_if_in_use(i915); + wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm); if (!wakeref) shrink &= ~I915_SHRINK_BOUND; } @@ -265,7 +265,7 @@ i915_gem_shrink(struct drm_i915_private *i915, } if (shrink & I915_SHRINK_BOUND) - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); i915_retire_requests(i915); diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 73e667b31cc4..b74729b6f353 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -1754,7 +1754,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) return PTR_ERR(file); mutex_lock(&dev_priv->drm.struct_mutex); - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); ctx = live_context(dev_priv, file); if (IS_ERR(ctx)) { @@ -1768,7 +1768,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) err = i915_subtests(tests, ctx); out_unlock: - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); mutex_unlock(&dev_priv->drm.struct_mutex); mock_file_free(dev_priv, file); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c index c72e17da090c..8f22d3f18422 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c @@ -293,7 +293,7 @@ static int igt_gem_coherency(void *arg) values = offsets + ncachelines; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); for (over = igt_coherency_mode; over->name; over++) { if (!over->set) continue; @@ -371,7 +371,7 @@ static int igt_gem_coherency(void *arg) } } unlock: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); kfree(offsets); return err; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 74b0e5871c4b..84b83b215b3d 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -53,7 +53,7 @@ static int live_nop_switch(void *arg) return PTR_ERR(file); mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL); if (!ctx) { @@ -156,7 +156,7 @@ static int live_nop_switch(void *arg) } out_unlock: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); mock_file_free(i915, file); return err; @@ -1090,7 +1090,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915, goto out_unlock; } - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); ce = i915_gem_context_get_engine(ctx, RCS0); if (IS_ERR(ce)) { @@ -1130,7 +1130,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915, out_context: intel_context_put(ce); out_rpm: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); i915_gem_object_put(obj); out_unlock: @@ -1547,7 +1547,7 @@ static int igt_vm_isolation(void *arg) GEM_BUG_ON(ctx_b->vm->total != vm_total); vm_total -= I915_GTT_PAGE_SIZE; - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); count = 0; for_each_engine(engine, i915, id) { @@ -1592,7 +1592,7 @@ static int igt_vm_isolation(void *arg) count, RUNTIME_INFO(i915)->num_engines); out_rpm: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); out_unlock: if (igt_live_test_end(&t)) err = -EIO; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index b92809418729..5c81f4b4813a 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -205,7 +205,7 @@ static int igt_partial_tiling(void *arg) } mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (1) { IGT_TIMEOUT(end); @@ -316,7 +316,7 @@ next_tiling: ; } out_unlock: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); i915_gem_object_unpin_pages(obj); out: diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index c0d986db5a75..39220e16ea26 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -1103,7 +1103,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine) return true; /* If the whole device is asleep, the engine must be idle */ - wakeref = intel_runtime_pm_get_if_in_use(dev_priv); + wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); if (!wakeref) return true; @@ -1117,7 +1117,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine) !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE)) idle = false; - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return idle; } @@ -1531,10 +1531,10 @@ void intel_engine_dump(struct intel_engine_cs *engine, rcu_read_unlock(); - wakeref = intel_runtime_pm_get_if_in_use(engine->i915); + wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm); if (wakeref) { intel_engine_print_registers(engine, m); - intel_runtime_pm_put(engine->i915, wakeref); + intel_runtime_pm_put(&engine->i915->runtime_pm, wakeref); } else { drm_printf(m, "\tDevice is asleep; skipping register dump\n"); } diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c index 174bb0a60309..6bcfa6456c45 100644 --- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/intel_hangcheck.c @@ -273,7 +273,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (i915_terminally_wedged(dev_priv)) return; - wakeref = intel_runtime_pm_get_if_in_use(dev_priv); + wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); if (!wakeref) return; @@ -324,7 +324,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (hung) hangcheck_declare_hang(dev_priv, hung, stuck); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); /* Reset timer in case GPU hangs without another request being added */ i915_queue_hangcheck(dev_priv); diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 60d24110af80..2009b1b57468 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -1310,7 +1310,7 @@ void i915_handle_error(struct drm_i915_private *i915, * isn't the case at least when we get here by doing a * simulated reset via debugfs, so get an RPM reference. */ - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); engine_mask &= INTEL_INFO(i915)->engine_mask; @@ -1373,7 +1373,7 @@ void i915_handle_error(struct drm_i915_private *i915, wake_up_all(&error->reset_queue); out: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); } int i915_reset_trylock(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 45379a63e013..b0b2998e56b8 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -394,7 +394,7 @@ static int igt_reset_nop(void *arg) } i915_gem_context_clear_bannable(ctx); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); reset_count = i915_reset_count(&i915->gpu_error); count = 0; do { @@ -441,7 +441,7 @@ static int igt_reset_nop(void *arg) err = igt_flush_test(i915, I915_WAIT_LOCKED); mutex_unlock(&i915->drm.struct_mutex); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); out: mock_file_free(i915, file); @@ -478,7 +478,7 @@ static int igt_reset_nop_engine(void *arg) } i915_gem_context_clear_bannable(ctx); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); for_each_engine(engine, i915, id) { unsigned int reset_count, reset_engine_count; unsigned int count; @@ -549,7 +549,7 @@ static int igt_reset_nop_engine(void *arg) err = igt_flush_test(i915, I915_WAIT_LOCKED); mutex_unlock(&i915->drm.struct_mutex); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); out: mock_file_free(i915, file); if (i915_reset_failed(i915)) @@ -1749,7 +1749,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) if (i915_terminally_wedged(i915)) return -EIO; /* we're long past hope of a successful reset */ - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck); drain_delayed_work(&i915->gpu_error.hangcheck_work); /* flush param */ @@ -1760,7 +1760,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) mutex_unlock(&i915->drm.struct_mutex); i915_modparams.enable_hangcheck = saved_hangcheck; - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); return err; } diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index f0ca2a09dabd..d84d31e3da19 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -33,7 +33,7 @@ static int live_sanitycheck(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (igt_spinner_init(&spin, i915)) goto err_unlock; @@ -74,7 +74,7 @@ static int live_sanitycheck(void *arg) igt_spinner_fini(&spin); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -97,7 +97,7 @@ static int live_busywait_preempt(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); ctx_hi = kernel_context(i915); if (!ctx_hi) @@ -255,7 +255,7 @@ static int live_busywait_preempt(void *arg) err_unlock: if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -277,7 +277,7 @@ static int live_preempt(void *arg) pr_err("Logical preemption supported, but not exposed\n"); mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; @@ -362,7 +362,7 @@ static int live_preempt(void *arg) igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -382,7 +382,7 @@ static int live_late_preempt(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; @@ -466,7 +466,7 @@ static int live_late_preempt(void *arg) igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -532,7 +532,7 @@ static int live_suppress_self_preempt(void *arg) return 0; /* presume black blox */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (preempt_client_init(i915, &a)) goto err_unlock; @@ -606,7 +606,7 @@ static int live_suppress_self_preempt(void *arg) err_unlock: if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -683,7 +683,7 @@ static int live_suppress_wait_preempt(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (preempt_client_init(i915, &client[0])) /* ELSP[0] */ goto err_unlock; @@ -776,7 +776,7 @@ static int live_suppress_wait_preempt(void *arg) err_unlock: if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -807,7 +807,7 @@ static int live_chain_preempt(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (preempt_client_init(i915, &hi)) goto err_unlock; @@ -924,7 +924,7 @@ static int live_chain_preempt(void *arg) err_unlock: if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -953,7 +953,7 @@ static int live_preempt_hang(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; @@ -1050,7 +1050,7 @@ static int live_preempt_hang(void *arg) igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -1256,7 +1256,7 @@ static int live_preempt_smoke(void *arg) return -ENOMEM; mutex_lock(&smoke.i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(smoke.i915); + wakeref = intel_runtime_pm_get(&smoke.i915->runtime_pm); smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE); if (IS_ERR(smoke.batch)) { @@ -1309,7 +1309,7 @@ static int live_preempt_smoke(void *arg) err_batch: i915_gem_object_put(smoke.batch); err_unlock: - intel_runtime_pm_put(smoke.i915, wakeref); + intel_runtime_pm_put(&smoke.i915->runtime_pm, wakeref); mutex_unlock(&smoke.i915->drm.struct_mutex); kfree(smoke.contexts); diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c index 607473439eb0..91f0349882fc 100644 --- a/drivers/gpu/drm/i915/gt/selftest_reset.c +++ b/drivers/gpu/drm/i915/gt/selftest_reset.c @@ -42,14 +42,14 @@ static int igt_wedged_reset(void *arg) /* Check that we can recover a wedged device with a GPU reset */ igt_global_reset_lock(i915); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); i915_gem_set_wedged(i915); GEM_BUG_ON(!i915_reset_failed(i915)); i915_reset(i915, ALL_ENGINES, NULL); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); igt_global_reset_unlock(i915); return i915_reset_failed(i915) ? -EIO : 0; diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index c8d335d63f9c..f1d27348b3a7 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -642,7 +642,7 @@ static int live_dirty_whitelist(void *arg) if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */ return 0; - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); mutex_unlock(&i915->drm.struct_mutex); file = mock_file(i915); @@ -672,7 +672,7 @@ static int live_dirty_whitelist(void *arg) mock_file_free(i915, file); mutex_lock(&i915->drm.struct_mutex); out_rpm: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); return err; } @@ -1061,7 +1061,7 @@ live_gpu_reset_workarounds(void *arg) pr_info("Verifying after GPU reset...\n"); igt_global_reset_lock(i915); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); reference_lists_init(i915, &lists); @@ -1076,7 +1076,7 @@ live_gpu_reset_workarounds(void *arg) out: kernel_context_close(ctx); reference_lists_fini(i915, &lists); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); igt_global_reset_unlock(i915); return ok ? 0 : -ESRCH; @@ -1103,7 +1103,7 @@ live_engine_reset_workarounds(void *arg) return PTR_ERR(ctx); igt_global_reset_lock(i915); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); reference_lists_init(i915, &lists); @@ -1160,7 +1160,7 @@ live_engine_reset_workarounds(void *arg) err: reference_lists_fini(i915, &lists); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); igt_global_reset_unlock(i915); kernel_context_close(ctx); diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 7f4e3456ce11..b2080a99c5c2 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -169,7 +169,7 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu) if (WARN_ON(!vgpu_fence_sz(vgpu))) return; - intel_runtime_pm_get(dev_priv); + intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&dev_priv->drm.struct_mutex); _clear_vgpu_fence(vgpu); @@ -180,17 +180,18 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu) } mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); } static int alloc_vgpu_fence(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_private *dev_priv = gvt->dev_priv; + struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; struct drm_i915_fence_reg *reg; int i; - intel_runtime_pm_get(dev_priv); + intel_runtime_pm_get(rpm); /* Request fences from host */ mutex_lock(&dev_priv->drm.struct_mutex); @@ -206,7 +207,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) _clear_vgpu_fence(vgpu); mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(rpm); return 0; out_free_fence: gvt_vgpu_err("Failed to alloc fences\n"); @@ -219,7 +220,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) vgpu->fence.regs[i] = NULL; } mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(rpm); return -ENOSPC; } @@ -315,9 +316,9 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - intel_runtime_pm_get(dev_priv); + intel_runtime_pm_get(&dev_priv->runtime_pm); _clear_vgpu_fence(vgpu); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); } /** diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index b54f2bdc13a4..5fee2e112c55 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -584,12 +584,12 @@ enum { static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv) { - intel_runtime_pm_get(dev_priv); + intel_runtime_pm_get(&dev_priv->runtime_pm); } static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv) { - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); } /** diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 1c763a27a412..2369d4a9af94 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -465,7 +465,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) scheduler->current_vgpu = NULL; } - intel_runtime_pm_get(dev_priv); + intel_runtime_pm_get(&dev_priv->runtime_pm); spin_lock_bh(&scheduler->mmio_context_lock); for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { if (scheduler->engine_owner[ring_id] == vgpu) { @@ -474,6 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) } } spin_unlock_bh(&scheduler->mmio_context_lock); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); mutex_unlock(&vgpu->gvt->sched_lock); } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index e301efb18d45..9029de4b4651 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -1526,11 +1526,11 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, * as there is only one pre-allocated buf-obj for shadow. */ if (list_empty(workload_q_head(vgpu, ring_id))) { - intel_runtime_pm_get(dev_priv); + intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&dev_priv->drm.struct_mutex); ret = intel_gvt_scan_and_shadow_workload(workload); mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); } if (ret) { diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 46014444ad2d..c0304b685e7e 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -659,7 +659,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) intel_wakeref_t wakeref; int i, pipe; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); if (IS_CHERRYVIEW(dev_priv)) { intel_wakeref_t pref; @@ -865,7 +865,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) } } - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -1004,7 +1004,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) intel_wakeref_t wakeref; int ret = 0; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); if (IS_GEN(dev_priv, 5)) { u16 rgvswctl = I915_READ16(MEMSWCTL); @@ -1216,7 +1216,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq); seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return ret; } @@ -1561,7 +1561,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused) if (!HAS_FBC(dev_priv)) return -ENODEV; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&fbc->lock); if (intel_fbc_is_active(dev_priv)) @@ -1588,7 +1588,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused) } mutex_unlock(&fbc->lock); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -1638,7 +1638,7 @@ static int i915_ips_status(struct seq_file *m, void *unused) if (!HAS_IPS(dev_priv)) return -ENODEV; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); seq_printf(m, "Enabled by kernel parameter: %s\n", yesno(i915_modparams.enable_ips)); @@ -1652,7 +1652,7 @@ static int i915_ips_status(struct seq_file *m, void *unused) seq_puts(m, "Currently: disabled\n"); } - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -1731,7 +1731,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { ia_freq = gpu_freq; sandybridge_pcode_read(dev_priv, @@ -1745,7 +1745,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) ((ia_freq >> 0) & 0xff) * 100, ((ia_freq >> 8) & 0xff) * 100); } - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -1921,7 +1921,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data) struct drm_i915_private *dev_priv = node_to_i915(m->private); intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); seq_printf(m, "bit6 swizzle for X-tiling = %s\n", swizzle_string(dev_priv->mm.bit_6_swizzle_x)); @@ -1959,7 +1959,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data) if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) seq_puts(m, "L-shaped memory detected\n"); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -2472,7 +2472,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) if (!psr->sink_support) return 0; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&psr->lock); if (psr->enabled) @@ -2536,7 +2536,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) unlock: mutex_unlock(&psr->lock); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -2553,11 +2553,11 @@ i915_edp_psr_debug_set(void *data, u64 val) DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val); - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); ret = intel_psr_debug_set(dev_priv, val); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return ret; } @@ -2673,7 +2673,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused) csr = &dev_priv->csr; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL)); seq_printf(m, "path: %s\n", csr->fw_path); @@ -2699,7 +2699,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused) seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE)); seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL)); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -2983,7 +2983,7 @@ static int i915_display_info(struct seq_file *m, void *unused) struct drm_connector_list_iter conn_iter; intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); seq_printf(m, "CRTC info\n"); seq_printf(m, "---------\n"); @@ -3032,7 +3032,7 @@ static int i915_display_info(struct seq_file *m, void *unused) drm_connector_list_iter_end(&conn_iter); mutex_unlock(&dev->mode_config.mutex); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -3045,7 +3045,7 @@ static int i915_engine_info(struct seq_file *m, void *unused) enum intel_engine_id id; struct drm_printer p; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); seq_printf(m, "GT awake? %s [%d]\n", yesno(dev_priv->gt.awake), @@ -3057,7 +3057,7 @@ static int i915_engine_info(struct seq_file *m, void *unused) for_each_engine(engine, dev_priv, id) intel_engine_dump(engine, &p, "%s\n", engine->name); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -4220,7 +4220,8 @@ static int i915_forcewake_open(struct inode *inode, struct file *file) if (INTEL_GEN(i915) < 6) return 0; - file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915); + file->private_data = + (void *)(uintptr_t)intel_runtime_pm_get(&i915->runtime_pm); intel_uncore_forcewake_user_get(&i915->uncore); return 0; @@ -4234,7 +4235,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file) return 0; intel_uncore_forcewake_user_put(&i915->uncore); - intel_runtime_pm_put(i915, + intel_runtime_pm_put(&i915->runtime_pm, (intel_wakeref_t)(uintptr_t)file->private_data); return 0; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e980c1ee3dcf..2b5f546fbcb2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -373,7 +373,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, if (ret) return ret; - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE | PIN_NONFAULT | @@ -460,7 +460,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, i915_vma_unpin(vma); } out_unlock: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return ret; @@ -560,6 +560,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_ggtt *ggtt = &i915->ggtt; + struct intel_runtime_pm *rpm = &i915->runtime_pm; intel_wakeref_t wakeref; struct drm_mm_node node; struct dma_fence *fence; @@ -580,14 +581,14 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, * This easily dwarfs any performance advantage from * using the cache bypass of indirect GGTT access. */ - wakeref = intel_runtime_pm_get_if_in_use(i915); + wakeref = intel_runtime_pm_get_if_in_use(rpm); if (!wakeref) { ret = -EFAULT; goto out_unlock; } } else { /* No backing pages, no fallback, we must force GGTT access */ - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(rpm); } vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, @@ -683,7 +684,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, i915_vma_unpin(vma); } out_rpm: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(rpm, wakeref); out_unlock: mutex_unlock(&i915->drm.struct_mutex); return ret; @@ -1173,7 +1174,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915) GEM_TRACE("\n"); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); /* @@ -1196,7 +1197,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915) intel_gt_sanitize(i915, false); intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_lock(&i915->drm.struct_mutex); i915_gem_contexts_lost(i915); diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index f2cef3fa3deb..0174a5ceda13 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -269,7 +269,7 @@ static int fence_update(struct drm_i915_fence_reg *fence, * be cleared before we can use any other fences to ensure that * the new fences do not overlap the elided clears, confusing HW. */ - wakeref = intel_runtime_pm_get_if_in_use(fence->i915); + wakeref = intel_runtime_pm_get_if_in_use(&fence->i915->runtime_pm); if (!wakeref) { GEM_BUG_ON(vma); return 0; @@ -283,7 +283,7 @@ static int fence_update(struct drm_i915_fence_reg *fence, list_move_tail(&fence->link, &fence->i915->mm.fence_list); } - intel_runtime_pm_put(fence->i915, wakeref); + intel_runtime_pm_put(&fence->i915->runtime_pm, wakeref); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index e70675bfb51d..cf853d3ae58a 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1825,7 +1825,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, unsigned int pde; bool flush = false; - wakeref = intel_runtime_pm_get(vm->i915); + wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); spin_lock(&ppgtt->base.pd.lock); gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) { @@ -1868,12 +1868,12 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, gen6_ggtt_invalidate(vm->i915); } - intel_runtime_pm_put(vm->i915, wakeref); + intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); return 0; unwind_out: - intel_runtime_pm_put(vm->i915, wakeref); + intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); gen6_ppgtt_clear_range(vm, from, start - from); return -ENOMEM; } diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index d92ddfada262..3d8162d28730 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1375,7 +1375,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream) free_oa_buffer(dev_priv); intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); - intel_runtime_pm_put(dev_priv, stream->wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref); if (stream->ctx) oa_put_render_ctx_id(stream); @@ -2112,7 +2112,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, * In our case we are expecting that taking pm + FORCEWAKE * references will effectively disable RC6. */ - stream->wakeref = intel_runtime_pm_get(dev_priv); + stream->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); ret = alloc_oa_buffer(dev_priv); @@ -2148,7 +2148,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, put_oa_config(dev_priv, stream->oa_config); intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); - intel_runtime_pm_put(dev_priv, stream->wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref); err_config: if (stream->ctx) diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 1ccda0ee4ff5..8f105d441a52 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -171,7 +171,7 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) wakeref = 0; if (READ_ONCE(dev_priv->gt.awake)) - wakeref = intel_runtime_pm_get_if_in_use(dev_priv); + wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); if (!wakeref) return; @@ -207,7 +207,7 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) } spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); } static void @@ -441,14 +441,15 @@ static u64 __get_rc6(struct drm_i915_private *i915) static u64 get_rc6(struct drm_i915_private *i915) { #if IS_ENABLED(CONFIG_PM) + struct intel_runtime_pm *rpm = &i915->runtime_pm; intel_wakeref_t wakeref; unsigned long flags; u64 val; - wakeref = intel_runtime_pm_get_if_in_use(i915); + wakeref = intel_runtime_pm_get_if_in_use(rpm); if (wakeref) { val = __get_rc6(i915); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(rpm, wakeref); /* * If we are coming back from being runtime suspended we must @@ -467,8 +468,7 @@ static u64 get_rc6(struct drm_i915_private *i915) spin_unlock_irqrestore(&i915->pmu.lock, flags); } else { - struct pci_dev *pdev = i915->drm.pdev; - struct device *kdev = &pdev->dev; + struct device *kdev = rpm->kdev; /* * We are runtime suspended. diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 3ef07b987d40..75acbf686ec9 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -264,7 +264,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, intel_wakeref_t wakeref; u32 freq; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { vlv_punit_get(dev_priv); @@ -276,7 +276,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1)); } - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(dev_priv, freq)); } @@ -364,7 +364,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, if (ret) return ret; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&rps->lock); val = intel_freq_opcode(dev_priv, val); @@ -392,7 +392,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, unlock: mutex_unlock(&rps->lock); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return ret ?: count; } @@ -420,7 +420,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, if (ret) return ret; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&rps->lock); val = intel_freq_opcode(dev_priv, val); @@ -444,7 +444,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, unlock: mutex_unlock(&rps->lock); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return ret ?: count; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 62fa573f90e8..199ee5ca3ea3 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2112,7 +2112,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, * intel_runtime_pm_put(), so it is correct to wrap only the * pin/unpin/fence and not more. */ - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); i915_gem_object_lock(obj); atomic_inc(&dev_priv->gpu_error.pending_fb_pin); @@ -2169,7 +2169,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, atomic_dec(&dev_priv->gpu_error.pending_fb_pin); i915_gem_object_unlock(obj); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return vma; } @@ -13854,7 +13854,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref); } - intel_runtime_pm_put(dev_priv, intel_state->wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref); /* * Defer the cleanup of the old state to a separate worker to not @@ -13933,7 +13933,7 @@ static int intel_atomic_commit(struct drm_device *dev, struct drm_i915_private *dev_priv = to_i915(dev); int ret = 0; - intel_state->wakeref = intel_runtime_pm_get(dev_priv); + intel_state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); drm_atomic_state_get(state); i915_sw_fence_init(&intel_state->commit_ready, @@ -13971,7 +13971,7 @@ static int intel_atomic_commit(struct drm_device *dev, if (ret) { DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); i915_sw_fence_commit(&intel_state->commit_ready); - intel_runtime_pm_put(dev_priv, intel_state->wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref); return ret; } @@ -13983,7 +13983,7 @@ static int intel_atomic_commit(struct drm_device *dev, i915_sw_fence_commit(&intel_state->commit_ready); drm_atomic_helper_cleanup_planes(dev, state); - intel_runtime_pm_put(dev_priv, intel_state->wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref); return ret; } dev_priv->wm.distrust_bios_wm = false; diff --git a/drivers/gpu/drm/i915/intel_display_power.c b/drivers/gpu/drm/i915/intel_display_power.c index 2f9f4e1d5884..cbd862fbfd76 100644 --- a/drivers/gpu/drm/i915/intel_display_power.c +++ b/drivers/gpu/drm/i915/intel_display_power.c @@ -1644,7 +1644,7 @@ intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, goto out_verify; cancel_delayed_work(&power_domains->async_put_work); - intel_runtime_pm_put_raw(dev_priv, + intel_runtime_pm_put_raw(&dev_priv->runtime_pm, fetch_and_zero(&power_domains->async_put_wakeref)); out_verify: verify_async_put_domains_state(power_domains); @@ -1684,7 +1684,7 @@ intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { struct i915_power_domains *power_domains = &dev_priv->power_domains; - intel_wakeref_t wakeref = intel_runtime_pm_get(dev_priv); + intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&power_domains->lock); __intel_display_power_get_domain(dev_priv, domain); @@ -1713,7 +1713,7 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, intel_wakeref_t wakeref; bool is_enabled; - wakeref = intel_runtime_pm_get_if_in_use(dev_priv); + wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); if (!wakeref) return false; @@ -1729,7 +1729,7 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, mutex_unlock(&power_domains->lock); if (!is_enabled) { - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); wakeref = 0; } @@ -1786,7 +1786,7 @@ void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { __intel_display_power_put(dev_priv, domain); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); } static void @@ -1806,6 +1806,7 @@ release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) struct drm_i915_private *dev_priv = container_of(power_domains, struct drm_i915_private, power_domains); + struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; enum intel_display_power_domain domain; intel_wakeref_t wakeref; @@ -1814,8 +1815,8 @@ release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) * wakeref to make the state checker happy about the HW access during * power well disabling. */ - assert_rpm_raw_wakeref_held(&dev_priv->runtime_pm); - wakeref = intel_runtime_pm_get(dev_priv); + assert_rpm_raw_wakeref_held(rpm); + wakeref = intel_runtime_pm_get(rpm); for_each_power_domain(domain, mask) { /* Clear before put, so put's sanity check is happy. */ @@ -1823,7 +1824,7 @@ release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) __intel_display_power_put_domain(dev_priv, domain); } - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(rpm, wakeref); } static void @@ -1833,7 +1834,8 @@ intel_display_power_put_async_work(struct work_struct *work) container_of(work, struct drm_i915_private, power_domains.async_put_work.work); struct i915_power_domains *power_domains = &dev_priv->power_domains; - intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(dev_priv); + struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; + intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); intel_wakeref_t old_work_wakeref = 0; mutex_lock(&power_domains->lock); @@ -1863,9 +1865,9 @@ intel_display_power_put_async_work(struct work_struct *work) mutex_unlock(&power_domains->lock); if (old_work_wakeref) - intel_runtime_pm_put_raw(dev_priv, old_work_wakeref); + intel_runtime_pm_put_raw(rpm, old_work_wakeref); if (new_work_wakeref) - intel_runtime_pm_put_raw(dev_priv, new_work_wakeref); + intel_runtime_pm_put_raw(rpm, new_work_wakeref); } /** @@ -1883,7 +1885,8 @@ void __intel_display_power_put_async(struct drm_i915_private *i915, intel_wakeref_t wakeref) { struct i915_power_domains *power_domains = &i915->power_domains; - intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(i915); + struct intel_runtime_pm *rpm = &i915->runtime_pm; + intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); mutex_lock(&power_domains->lock); @@ -1910,9 +1913,9 @@ void __intel_display_power_put_async(struct drm_i915_private *i915, mutex_unlock(&power_domains->lock); if (work_wakeref) - intel_runtime_pm_put_raw(i915, work_wakeref); + intel_runtime_pm_put_raw(rpm, work_wakeref); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(rpm, wakeref); } /** @@ -1948,7 +1951,7 @@ void intel_display_power_flush_work(struct drm_i915_private *i915) mutex_unlock(&power_domains->lock); if (work_wakeref) - intel_runtime_pm_put_raw(i915, work_wakeref); + intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); } /** @@ -1987,7 +1990,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, intel_wakeref_t wakeref) { __intel_display_power_put(dev_priv, domain); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); } #endif @@ -4399,7 +4402,7 @@ void intel_power_domains_fini_hw(struct drm_i915_private *i915) intel_power_domains_verify_state(i915); /* Keep the power well enabled, but cancel its rpm wakeref. */ - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); } /** diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 0d3a6fa674e6..1edd44ee32b2 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -213,7 +213,7 @@ static int intelfb_create(struct drm_fb_helper *helper, } mutex_lock(&dev->struct_mutex); - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); /* Pin the GGTT vma for our access via info->screen_base. * This also validates that any existing fb inherited from the @@ -272,7 +272,7 @@ static int intelfb_create(struct drm_fb_helper *helper, ifbdev->vma = vma; ifbdev->vma_flags = flags; - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); mutex_unlock(&dev->struct_mutex); vga_switcheroo_client_fb_set(pdev, info); return 0; @@ -280,7 +280,7 @@ static int intelfb_create(struct drm_fb_helper *helper, out_unpin: intel_unpin_fb_vma(vma, flags); out_unlock: - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); mutex_unlock(&dev->struct_mutex); return ret; } diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index ff9eb3c855d3..ea3de4acc850 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -230,7 +230,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) intel_wakeref_t wakeref; enum hpd_pin pin; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); spin_lock_irq(&dev_priv->irq_lock); for_each_hpd_pin(pin) { @@ -263,7 +263,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); } bool intel_encoder_hotplug(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 3d9ea3498679..502c54428570 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -369,7 +369,7 @@ static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm, /** * intel_runtime_pm_get_raw - grab a raw runtime pm reference - * @i915: i915 device instance + * @rpm: the intel_runtime_pm structure * * This is the unlocked version of intel_display_power_is_enabled() and should * only be used from error capture and recovery code where deadlocks are @@ -384,15 +384,14 @@ static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm, * Returns: the wakeref cookie to pass to intel_runtime_pm_put_raw(), evaluates * as True if the wakeref was acquired, or False otherwise. */ - -intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915) +intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm) { - return __intel_runtime_pm_get(&i915->runtime_pm, false); + return __intel_runtime_pm_get(rpm, false); } /** * intel_runtime_pm_get - grab a runtime pm reference - * @i915: i915 device instance + * @rpm: the intel_runtime_pm structure * * This function grabs a device-level runtime pm reference (mostly used for GEM * code to ensure the GTT or GT is on) and ensures that it is powered up. @@ -402,14 +401,14 @@ intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915) * * Returns: the wakeref cookie to pass to intel_runtime_pm_put() */ -intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915) +intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm) { - return __intel_runtime_pm_get(&i915->runtime_pm, true); + return __intel_runtime_pm_get(rpm, true); } /** * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use - * @i915: i915 device instance + * @rpm: the intel_runtime_pm structure * * This function grabs a device-level runtime pm reference if the device is * already in use and ensures that it is powered up. It is illegal to try @@ -421,10 +420,8 @@ intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915) * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates * as True if the wakeref was acquired, or False otherwise. */ -intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915) +intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm) { - struct intel_runtime_pm *rpm = &i915->runtime_pm; - if (IS_ENABLED(CONFIG_PM)) { /* * In cases runtime PM is disabled by the RPM core and we get @@ -443,7 +440,7 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915) /** * intel_runtime_pm_get_noresume - grab a runtime pm reference - * @i915: i915 device instance + * @rpm: the intel_runtime_pm structure * * This function grabs a device-level runtime pm reference (mostly used for GEM * code to ensure the GTT or GT is on). @@ -460,10 +457,8 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915) * * Returns: the wakeref cookie to pass to intel_runtime_pm_put() */ -intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915) +intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm) { - struct intel_runtime_pm *rpm = &i915->runtime_pm; - assert_rpm_wakelock_held(rpm); pm_runtime_get_noresume(rpm->kdev); @@ -488,7 +483,7 @@ static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm, /** * intel_runtime_pm_put_raw - release a raw runtime pm reference - * @i915: i915 device instance + * @rpm: the intel_runtime_pm structure * @wref: wakeref acquired for the reference that is being released * * This function drops the device-level runtime pm reference obtained by @@ -496,14 +491,14 @@ static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm, * hardware block right away if this is the last reference. */ void -intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref) +intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref) { - __intel_runtime_pm_put(&i915->runtime_pm, wref, false); + __intel_runtime_pm_put(rpm, wref, false); } /** * intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference - * @i915: i915 device instance + * @rpm: the intel_runtime_pm structure * * This function drops the device-level runtime pm reference obtained by * intel_runtime_pm_get() and might power down the corresponding @@ -513,24 +508,24 @@ intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref) * new code, as the correctness of its use cannot be checked. Always use * intel_runtime_pm_put() instead. */ -void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915) +void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm) { - __intel_runtime_pm_put(&i915->runtime_pm, -1, true); + __intel_runtime_pm_put(rpm, -1, true); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) /** * intel_runtime_pm_put - release a runtime pm reference - * @i915: i915 device instance + * @rpm: the intel_runtime_pm structure * @wref: wakeref acquired for the reference that is being released * * This function drops the device-level runtime pm reference obtained by * intel_runtime_pm_get() and might power down the corresponding * hardware block right away if this is the last reference. */ -void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref) +void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref) { - __intel_runtime_pm_put(&i915->runtime_pm, wref, true); + __intel_runtime_pm_put(rpm, wref, true); } #endif diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h index 0890e698f196..f6445ca5bbf1 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.h +++ b/drivers/gpu/drm/i915/intel_runtime_pm.h @@ -174,30 +174,30 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm); void intel_runtime_pm_disable(struct intel_runtime_pm *rpm); void intel_runtime_pm_cleanup(struct intel_runtime_pm *rpm); -intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915); -intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915); -intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915); -intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915); +intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm); +intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm); +intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm); +intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm); #define with_intel_runtime_pm(i915, wf) \ - for ((wf) = intel_runtime_pm_get(i915); (wf); \ - intel_runtime_pm_put((i915), (wf)), (wf) = 0) + for ((wf) = intel_runtime_pm_get(&(i915)->runtime_pm); (wf); \ + intel_runtime_pm_put(&(i915)->runtime_pm, (wf)), (wf) = 0) #define with_intel_runtime_pm_if_in_use(i915, wf) \ - for ((wf) = intel_runtime_pm_get_if_in_use(i915); (wf); \ - intel_runtime_pm_put((i915), (wf)), (wf) = 0) + for ((wf) = intel_runtime_pm_get_if_in_use(&(i915)->runtime_pm); (wf); \ + intel_runtime_pm_put(&(i915)->runtime_pm, (wf)), (wf) = 0) -void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915); +void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) -void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref); +void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref); #else static inline void -intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref) +intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref) { - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put_unchecked(rpm); } #endif -void intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref); +void intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c index b6c7167ce154..b677ae893d6f 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.c +++ b/drivers/gpu/drm/i915/intel_wakeref.c @@ -9,14 +9,14 @@ static void rpm_get(struct drm_i915_private *i915, struct intel_wakeref *wf) { - wf->wakeref = intel_runtime_pm_get(i915); + wf->wakeref = intel_runtime_pm_get(&i915->runtime_pm); } static void rpm_put(struct drm_i915_private *i915, struct intel_wakeref *wf) { intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); GEM_BUG_ON(!wakeref); } @@ -86,7 +86,7 @@ static void wakeref_auto_timeout(struct timer_list *t) wakeref = fetch_and_zero(&wf->wakeref); spin_unlock_irqrestore(&wf->lock, flags); - intel_runtime_pm_put(wf->i915, wakeref); + intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref); } void intel_wakeref_auto_init(struct intel_wakeref_auto *wf, @@ -116,7 +116,7 @@ void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout) spin_lock_irqsave(&wf->lock, flags); if (!refcount_inc_not_zero(&wf->count)) { GEM_BUG_ON(wf->wakeref); - wf->wakeref = intel_runtime_pm_get_if_in_use(wf->i915); + wf->wakeref = intel_runtime_pm_get_if_in_use(&wf->i915->runtime_pm); refcount_set(&wf->count, 1); } spin_unlock_irqrestore(&wf->lock, flags); diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c index cc1ca4be1a00..c0b3537a5fa6 100644 --- a/drivers/gpu/drm/i915/selftests/i915_active.c +++ b/drivers/gpu/drm/i915/selftests/i915_active.c @@ -97,7 +97,7 @@ static int live_active_wait(void *arg) /* Check that we get a callback when requests retire upon waiting */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); err = __live_active_setup(i915, &active); @@ -111,7 +111,7 @@ static int live_active_wait(void *arg) if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -126,7 +126,7 @@ static int live_active_retire(void *arg) /* Check that we get a callback when requests are indirectly retired */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); err = __live_active_setup(i915, &active); @@ -140,7 +140,7 @@ static int live_active_retire(void *arg) } i915_active_fini(&active.base); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index 83643929416c..23a54da47ca5 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -63,7 +63,7 @@ static void simulate_hibernate(struct drm_i915_private *i915) { intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); /* * As a final sting in the tail, invalidate stolen. Under a real S4, @@ -74,7 +74,7 @@ static void simulate_hibernate(struct drm_i915_private *i915) */ trash_stolen(i915); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); } static int pm_prepare(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 1d8235303edf..5a8bee3a1d90 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -400,7 +400,7 @@ static int igt_evict_contexts(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); /* Reserve a block so that we know we have enough to fit a few rq */ memset(&hole, 0, sizeof(hole)); @@ -511,7 +511,7 @@ static int igt_evict_contexts(void *arg) } if (drm_mm_node_allocated(&hole)) drm_mm_remove_node(&hole); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index a557e77d9c54..df75df9b52fc 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -295,9 +295,9 @@ static int lowlevel_hole(struct drm_i915_private *i915, mock_vma.node.size = BIT_ULL(size); mock_vma.node.start = addr; - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); } count = n; @@ -1171,7 +1171,7 @@ static int igt_ggtt_page(void *arg) if (err) goto out_unpin; - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); for (n = 0; n < count; n++) { u64 offset = tmp.start + n * PAGE_SIZE; @@ -1218,7 +1218,7 @@ static int igt_ggtt_page(void *arg) kfree(order); out_remove: ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); drm_mm_remove_node(&tmp); out_unpin: i915_gem_object_unpin_pages(obj); diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 11278bac3a24..dd4e72eafc6c 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -537,7 +537,7 @@ static int live_nop_request(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); for_each_engine(engine, i915, id) { struct i915_request *request = NULL; @@ -597,7 +597,7 @@ static int live_nop_request(void *arg) } out_unlock: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -682,7 +682,7 @@ static int live_empty_request(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); batch = empty_batch(i915); if (IS_ERR(batch)) { @@ -746,7 +746,7 @@ static int live_empty_request(void *arg) i915_vma_unpin(batch); i915_vma_put(batch); out_unlock: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -839,7 +839,7 @@ static int live_all_engines(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); err = igt_live_test_begin(&t, i915, __func__, ""); if (err) @@ -919,7 +919,7 @@ static int live_all_engines(void *arg) i915_vma_unpin(batch); i915_vma_put(batch); out_unlock: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -942,7 +942,7 @@ static int live_sequential_engines(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); err = igt_live_test_begin(&t, i915, __func__, ""); if (err) @@ -1048,7 +1048,7 @@ static int live_sequential_engines(void *arg) i915_request_put(request[id]); } out_unlock: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -1113,7 +1113,7 @@ static int live_breadcrumbs_smoketest(void *arg) * On real hardware this time. */ - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); file = mock_file(i915); if (IS_ERR(file)) { @@ -1220,7 +1220,7 @@ static int live_breadcrumbs_smoketest(void *arg) out_file: mock_file_free(i915, file); out_rpm: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); return ret; } diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c index acb2cc5136b7..724bf3650b3e 100644 --- a/drivers/gpu/drm/i915/selftests/i915_timeline.c +++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c @@ -515,7 +515,7 @@ static int live_hwsp_engine(void *arg) return -ENOMEM; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); count = 0; for_each_engine(engine, i915, id) { @@ -558,7 +558,7 @@ static int live_hwsp_engine(void *arg) i915_timeline_put(tl); } - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); kvfree(timelines); @@ -591,7 +591,7 @@ static int live_hwsp_alternate(void *arg) return -ENOMEM; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); count = 0; for (n = 0; n < NUM_TIMELINES; n++) { @@ -634,7 +634,7 @@ static int live_hwsp_alternate(void *arg) i915_timeline_put(tl); } - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); kvfree(timelines); @@ -658,7 +658,7 @@ static int live_hwsp_wrap(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); tl = i915_timeline_create(i915, NULL); if (IS_ERR(tl)) { @@ -749,7 +749,7 @@ static int live_hwsp_wrap(void *arg) out_free: i915_timeline_put(tl); out_rpm: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -771,7 +771,7 @@ static int live_hwsp_recycle(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); count = 0; for_each_engine(engine, i915, id) { @@ -825,7 +825,7 @@ static int live_hwsp_recycle(void *arg) out: if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index a166d9405a94..fbc79b14823a 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -873,7 +873,7 @@ static int igt_vma_remapped_gtt(void *arg) mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); for (t = types; *t; t++) { for (p = planes; p->width; p++) { @@ -965,7 +965,7 @@ static int igt_vma_remapped_gtt(void *arg) } out: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); i915_gem_object_put(obj); diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c index 7fd0321e0947..6ca8584cd64c 100644 --- a/drivers/gpu/drm/i915/selftests/intel_guc.c +++ b/drivers/gpu/drm/i915/selftests/intel_guc.c @@ -144,7 +144,7 @@ static int igt_guc_clients(void *args) GEM_BUG_ON(!HAS_GUC(dev_priv)); mutex_lock(&dev_priv->drm.struct_mutex); - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); guc = &dev_priv->guc; if (!guc) { @@ -227,7 +227,7 @@ static int igt_guc_clients(void *args) guc_clients_create(guc); guc_clients_enable(guc); unlock: - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); mutex_unlock(&dev_priv->drm.struct_mutex); return err; } @@ -247,7 +247,7 @@ static int igt_guc_doorbells(void *arg) GEM_BUG_ON(!HAS_GUC(dev_priv)); mutex_lock(&dev_priv->drm.struct_mutex); - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); guc = &dev_priv->guc; if (!guc) { @@ -340,7 +340,7 @@ static int igt_guc_doorbells(void *arg) guc_client_free(clients[i]); } unlock: - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); mutex_unlock(&dev_priv->drm.struct_mutex); return err; } diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c index e0d7ebecb215..86815c6072a1 100644 --- a/drivers/gpu/drm/i915/selftests/intel_uncore.c +++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c @@ -176,7 +176,7 @@ static int live_forcewake_ops(void *arg) return 0; } - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); for_each_fw_domain(domain, uncore, tmp) { smp_store_mb(domain->active, false); @@ -247,7 +247,7 @@ static int live_forcewake_ops(void *arg) } out_rpm: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); return err; } -- 2.20.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx