3.12-stable review patch. If anyone has any objections, please let me know. ------------------ From: Dirk Brandewie <dirk.j.brandewie@xxxxxxxxx> commit fcb6a15c2e7e76d493e6f91ea889ab40e1c643a4 upstream. Take non-idle time into account when calculating core busy time. This ensures that intel_pstate will notice a decrease in load. References: https://bugzilla.kernel.org/show_bug.cgi?id=66581 Cc: 3.10+ <stable@xxxxxxxxxxxxxxx> # 3.10+ Signed-off-by: Dirk Brandewie <dirk.j.brandewie@xxxxxxxxx> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@xxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- drivers/cpufreq/intel_pstate.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -51,6 +51,7 @@ struct sample { int32_t core_pct_busy; u64 aperf; u64 mperf; + unsigned long long tsc; int freq; }; @@ -86,6 +87,7 @@ struct cpudata { u64 prev_aperf; u64 prev_mperf; + unsigned long long prev_tsc; int sample_ptr; struct sample samples[SAMPLE_COUNT]; }; @@ -436,11 +438,17 @@ static inline void intel_pstate_calc_bus struct sample *sample) { u64 core_pct; - core_pct = div64_u64(int_tofp(sample->aperf * 100), - sample->mperf); - sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000); + u64 c0_pct; - sample->core_pct_busy = core_pct; + core_pct = div64_u64(sample->aperf * 100, sample->mperf); + + c0_pct = div64_u64(sample->mperf * 100, sample->tsc); + sample->freq = fp_toint( + mul_fp(int_tofp(cpu->pstate.max_pstate), + int_tofp(core_pct * 1000))); + + sample->core_pct_busy = mul_fp(int_tofp(core_pct), + div_fp(int_tofp(c0_pct + 1), int_tofp(100))); } static inline void intel_pstate_sample(struct cpudata *cpu) -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html