From: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx> If only a subset of events is enabled we can afford to suspend the sampling timer when the GPU is idle and so save some cycles and power. v2: Rebase and limit timer even more. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_drv.h | 5 ++++ drivers/gpu/drm/i915/i915_gem.c | 1 + drivers/gpu/drm/i915/i915_gem_request.c | 1 + drivers/gpu/drm/i915/i915_pmu.c | 51 ++++++++++++++++++++++++++++++--- 4 files changed, 54 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 142826742b86..d5ebc524d30d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2617,6 +2617,7 @@ struct drm_i915_private { struct pmu base; spinlock_t lock; struct hrtimer timer; + bool timer_enabled; u64 enable; u64 sample[__I915_NUM_PMU_SAMPLERS]; } pmu; @@ -3833,9 +3834,13 @@ extern void i915_perf_unregister(struct drm_i915_private *dev_priv); #ifdef CONFIG_PERF_EVENTS extern void i915_pmu_register(struct drm_i915_private *i915); extern void i915_pmu_unregister(struct drm_i915_private *i915); +extern void i915_pmu_gt_idle(struct drm_i915_private *i915); +extern void i915_pmu_gt_active(struct drm_i915_private *i915); #else static inline void i915_pmu_register(struct drm_i915_private *i915) {} static inline void i915_pmu_unregister(struct drm_i915_private *i915) {} +static inline void i915_pmu_gt_idle(struct drm_i915_private *i915) {} +static inline void i915_pmu_gt_active(struct drm_i915_private *i915) {} #endif /* i915_suspend.c */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a60885d6231b..1a2156f43d74 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3256,6 +3256,7 @@ i915_gem_idle_work_handler(struct work_struct *work) intel_engines_mark_idle(dev_priv); i915_gem_timelines_mark_idle(dev_priv); + i915_pmu_gt_idle(dev_priv); GEM_BUG_ON(!dev_priv->gt.awake); dev_priv->gt.awake = false; diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 9eedd33eb524..781f41461a71 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -879,6 +879,7 @@ static void i915_gem_mark_busy(const struct intel_engine_cs *engine) i915_update_gfx_val(dev_priv); if (INTEL_GEN(dev_priv) >= 6) gen6_rps_busy(dev_priv); + i915_pmu_gt_active(dev_priv); queue_delayed_work(dev_priv->wq, &dev_priv->gt.retire_work, diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 62c527c12641..0d9c0d07a432 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -59,6 +59,46 @@ static u64 event_enabled_mask(struct perf_event *event) return config_enabled_mask(event->attr.config); } +static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active) +{ + u64 enable = i915->pmu.enable; + + enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) | + config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) | + ENGINE_SAMPLE_MASK; + + if (!gpu_active) + enable &= ~ENGINE_SAMPLE_MASK; + + return enable; +} + +void i915_pmu_gt_idle(struct drm_i915_private *i915) +{ + spin_lock_irq(&i915->pmu.lock); + /* + * Signal sampling timer to stop if only engine events are enabled and + * GPU went idle. + */ + i915->pmu.timer_enabled = pmu_needs_timer(i915, false); + spin_unlock_irq(&i915->pmu.lock); +} + +void i915_pmu_gt_active(struct drm_i915_private *i915) +{ + spin_lock_irq(&i915->pmu.lock); + /* + * Re-enable sampling timer when GPU goes active. + */ + if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) { + hrtimer_start_range_ns(&i915->pmu.timer, + ns_to_ktime(PERIOD), 0, + HRTIMER_MODE_REL_PINNED); + i915->pmu.timer_enabled = true; + } + spin_unlock_irq(&i915->pmu.lock); +} + static bool grab_forcewake(struct drm_i915_private *i915, bool fw) { if (!fw) @@ -149,7 +189,7 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) struct drm_i915_private *i915 = container_of(hrtimer, struct drm_i915_private, pmu.timer); - if (i915->pmu.enable == 0) + if (!READ_ONCE(i915->pmu.timer_enabled)) return HRTIMER_NORESTART; engines_sample(i915); @@ -317,12 +357,14 @@ static void i915_pmu_enable(struct perf_event *event) spin_lock_irqsave(&i915->pmu.lock, flags); - if (i915->pmu.enable == 0) + i915->pmu.enable |= event_enabled_mask(event); + + if (pmu_needs_timer(i915, true) && !i915->pmu.timer_enabled) { hrtimer_start_range_ns(&i915->pmu.timer, ns_to_ktime(PERIOD), 0, HRTIMER_MODE_REL_PINNED); - - i915->pmu.enable |= event_enabled_mask(event); + i915->pmu.timer_enabled = true; + } if (is_engine_event(event)) { struct intel_engine_cs *engine; @@ -366,6 +408,7 @@ static void i915_pmu_disable(struct perf_event *event) } i915->pmu.enable &= ~mask; + i915->pmu.timer_enabled &= pmu_needs_timer(i915, true); spin_unlock_irqrestore(&i915->pmu.lock, flags); -- 2.9.4 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx