Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> writes: > Currently the timer is armed for 1ms after the first use and is killed > immediately, dropping the forcewake as early as possible. However, for > very frequent operations the forcewake dance has a large impact on > latency and keeping the timer alive until we are idle is preferred. To > achieve this, if we call intel_uncore_forcewake_get whilst the timer is > alive (repeated use), then set a flag to restart the timer on expiry > rather than drop the forcewake usage count. The timer is racy, the > consequence of the race is to expire the timer earlier than is now > desired but does not impact on correct behaviour. The offset the race > slightly, we set the active flag again on intel_uncore_forcewake_put. > > Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> > Cc: Mika Kuoppala <mika.kuoppala@xxxxxxxxx> > Cc: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx> > --- > drivers/gpu/drm/i915/intel_uncore.c | 15 ++++++++++++--- > drivers/gpu/drm/i915/intel_uncore.h | 1 + > 2 files changed, 13 insertions(+), 3 deletions(-) > > diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c > index 7eaa592aed26..2fd0989805eb 100644 > --- a/drivers/gpu/drm/i915/intel_uncore.c > +++ b/drivers/gpu/drm/i915/intel_uncore.c > @@ -216,6 +216,9 @@ intel_uncore_fw_release_timer(struct hrtimer *timer) > > assert_rpm_device_not_suspended(dev_priv); > > + if (xchg(&domain->active, false)) > + return HRTIMER_RESTART; > + > spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); > if (WARN_ON(domain->wake_count == 0)) > domain->wake_count++; > @@ -246,6 +249,7 @@ static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, > active_domains = 0; > > for_each_fw_domain(domain, dev_priv, tmp) { > + smp_store_mb(domain->active, false); > if (hrtimer_cancel(&domain->timer) == 0) > continue; > > @@ -453,9 +457,12 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, > > fw_domains &= dev_priv->uncore.fw_domains; > > - for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) > - if (domain->wake_count++) > + for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) { > + if (domain->wake_count++) { > fw_domains &= ~domain->mask; > + domain->active = true; > + } > + } > > if (fw_domains) > dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); > @@ -520,8 +527,10 @@ static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, > if (WARN_ON(domain->wake_count == 0)) > continue; > > - if (--domain->wake_count) > + if (--domain->wake_count) { > + domain->active = true; We dont wan't to set the active here for all domains that are going to power off, and delay their release for one timer tick to optimistically wait for next user? -Mika > continue; > + } > > fw_domain_arm_timer(domain); > } > diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h > index 5fec5fd4346c..dfead585835c 100644 > --- a/drivers/gpu/drm/i915/intel_uncore.h > +++ b/drivers/gpu/drm/i915/intel_uncore.h > @@ -95,6 +95,7 @@ struct intel_uncore { > struct intel_uncore_forcewake_domain { > unsigned int mask; > unsigned int wake_count; > + bool active; > struct hrtimer timer; > i915_reg_t reg_set; > i915_reg_t reg_ack; > -- > 2.11.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx