From: Dirk Brandewie <dirk.j.brandewie@xxxxxxxxx> Take non-idle time into account when calculating core busy time. This ensures that intel_pstate will notice a decrease in load. backport of commit: fcb6a15c2e7e76d493e6f91ea889ab40e1c643a4 Applies to v3.10.30, v3.12.11, v3.13.3 References: https://bugzilla.kernel.org/show_bug.cgi?id=66581 Cc: 3.10+ <stable@xxxxxxxxxxxxxxx> # 3.10+ Signed-off-by: Dirk Brandewie <dirk.j.brandewie@xxxxxxxxx> --- drivers/cpufreq/intel_pstate.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index d51f17ed..111b9ac 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -54,6 +54,7 @@ struct sample { int32_t core_pct_busy; u64 aperf; u64 mperf; + unsigned long long tsc; int freq; }; @@ -88,6 +89,7 @@ struct cpudata { u64 prev_aperf; u64 prev_mperf; + unsigned long long prev_tsc; int sample_ptr; struct sample samples[SAMPLE_COUNT]; }; @@ -499,11 +501,17 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu, struct sample *sample) { u64 core_pct; - core_pct = div64_u64(int_tofp(sample->aperf * 100), - sample->mperf); - sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000); + u64 c0_pct; - sample->core_pct_busy = core_pct; + core_pct = div64_u64(sample->aperf * 100, sample->mperf); + + c0_pct = div64_u64(sample->mperf * 100, sample->tsc); + sample->freq = fp_toint( + mul_fp(int_tofp(cpu->pstate.max_pstate), + int_tofp(core_pct * 1000))); + + sample->core_pct_busy = mul_fp(int_tofp(core_pct), + div_fp(int_tofp(c0_pct + 1), int_tofp(100))); } static inline void intel_pstate_sample(struct cpudata *cpu) -- 1.8.3.1 -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html