--- kernel/sched/idle.c | 10 ++++++++++ kernel/sched/sched.h | 13 +++++++++++++ 2 files changed, 23 insertions(+) diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index d17b0a5ce6ac..cc538acb3f1a 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -258,15 +258,25 @@ static void cpuidle_idle_call(void) * * Called with polling cleared. */ +DEFINE_PER_CPU(u64, last_util_update_time); /* in jiffies */ static void do_idle(void) { int cpu = smp_processor_id(); + u64 expire; /* * Check if we need to update blocked load */ nohz_run_idle_balance(cpu); +#ifdef CONFIG_X86_INTEL_PSTATE + expire = __this_cpu_read(last_util_update_time) + HZ * 3; + if (unlikely(time_is_before_jiffies(expire))) { + idle_update_util(); + __this_cpu_write(last_util_update_time, get_jiffies_64()); + } +#endif + /* * If the arch has a polling bit, we maintain an invariant: * diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 0e66749486e7..2a8d87988d1f 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2809,6 +2809,19 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) if (data) data->func(data, rq_clock(rq), flags); } + +static inline void idle_update_util(void) +{ + struct update_util_data *data; + struct rq *rq = cpu_rq(raw_smp_processor_id()); + + data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, + cpu_of(rq))); + if (data) + data->func(data, rq_clock(rq), 0); +} + + #else static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} #endif /* CONFIG_CPU_FREQ */ -- 2.25.1