On Tue, Aug 24, 2021 at 11:29:42AM +0000, Yafang Shao wrote: > diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c > index 3f93fc3b5648..b2542f4d3192 100644 > --- a/kernel/sched/stats.c > +++ b/kernel/sched/stats.c > @@ -4,6 +4,109 @@ > */ > #include "sched.h" > > +void __update_stats_wait_start(struct rq *rq, struct task_struct *p, > + struct sched_statistics *stats) > +{ > +u64 wait_start, prev_wait_start; indent fail... > + > + wait_start = rq_clock(rq); > + prev_wait_start = schedstat_val(stats->wait_start); > + > + if (p && likely(wait_start > prev_wait_start)) > + wait_start -= prev_wait_start; > + > + __schedstat_set(stats->wait_start, wait_start); > +} > diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h > index e6905e369c5d..9ecd81b91f26 100644 > --- a/kernel/sched/stats.h > +++ b/kernel/sched/stats.h > @@ -40,6 +42,33 @@ rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) > #define schedstat_val(var) (var) > #define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0) > > +void __update_stats_wait_start(struct rq *rq, struct task_struct *p, > + struct sched_statistics *stats); > + > +void __update_stats_wait_end(struct rq *rq, struct task_struct *p, > + struct sched_statistics *stats); > +void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p, > + struct sched_statistics *stats); > + > +static inline void > +check_schedstat_required(void) > +{ > + if (schedstat_enabled()) > + return; > + > + /* Force schedstat enabled if a dependent tracepoint is active */ > + if (trace_sched_stat_wait_enabled() || > + trace_sched_stat_sleep_enabled() || > + trace_sched_stat_iowait_enabled() || > + trace_sched_stat_blocked_enabled() || > + trace_sched_stat_runtime_enabled()) { > + printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, " > + "stat_blocked and stat_runtime require the " > + "kernel parameter schedstats=enable or " > + "kernel.sched_schedstats=1\n"); > + } > +} If you're moving this, you might as well reflow it to not have broken indentation.