Same logic: 2^32 threads stuck waiting in runqueue implies 2^32+ processes total which is absurd. Per-runqueue ->nr_iowait member being 32-bit hints that it is correct change! Signed-off-by: Alexey Dobriyan <adobriyan@xxxxxxxxx> --- drivers/cpuidle/governors/menu.c | 6 +++--- fs/proc/stat.c | 2 +- include/linux/sched/stat.h | 4 ++-- kernel/sched/core.c | 6 +++--- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index b0a7ad566081..ddaaa36af290 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -117,7 +117,7 @@ struct menu_device { int interval_ptr; }; -static inline int which_bucket(u64 duration_ns, unsigned long nr_iowaiters) +static inline int which_bucket(u64 duration_ns, unsigned int nr_iowaiters) { int bucket = 0; @@ -150,7 +150,7 @@ static inline int which_bucket(u64 duration_ns, unsigned long nr_iowaiters) * to be, the higher this multiplier, and thus the higher * the barrier to go to an expensive C state. */ -static inline int performance_multiplier(unsigned long nr_iowaiters) +static inline int performance_multiplier(unsigned int nr_iowaiters) { /* for IO wait tasks (per cpu!) we add 10x each */ return 1 + 10 * nr_iowaiters; @@ -270,7 +270,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, unsigned int predicted_us; u64 predicted_ns; u64 interactivity_req; - unsigned long nr_iowaiters; + unsigned int nr_iowaiters; ktime_t delta_next; int i, idx; diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 93ce344f62a5..678feb7b9949 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -198,7 +198,7 @@ static int show_stat(struct seq_file *p, void *v) "btime %llu\n" "processes %lu\n" "procs_running %u\n" - "procs_blocked %lu\n", + "procs_blocked %u\n", nr_context_switches(), (unsigned long long)boottime.tv_sec, total_forks, diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h index f3b86515bafe..c4bd2fc95219 100644 --- a/include/linux/sched/stat.h +++ b/include/linux/sched/stat.h @@ -18,8 +18,8 @@ DECLARE_PER_CPU(unsigned long, process_counts); extern int nr_processes(void); unsigned int nr_running(void); extern bool single_task_running(void); -extern unsigned long nr_iowait(void); -extern unsigned long nr_iowait_cpu(int cpu); +unsigned int nr_iowait(void); +unsigned int nr_iowait_cpu(int cpu); static inline int sched_info_on(void) { diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d9bae602966c..ec98244e9d96 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3428,7 +3428,7 @@ unsigned long long nr_context_switches(void) * it does become runnable. */ -unsigned long nr_iowait_cpu(int cpu) +unsigned int nr_iowait_cpu(int cpu) { return atomic_read(&cpu_rq(cpu)->nr_iowait); } @@ -3463,9 +3463,9 @@ unsigned long nr_iowait_cpu(int cpu) * Task CPU affinities can make all that even more 'interesting'. */ -unsigned long nr_iowait(void) +unsigned int nr_iowait(void) { - unsigned long i, sum = 0; + unsigned int i, sum = 0; for_each_possible_cpu(i) sum += nr_iowait_cpu(i); -- 2.24.1