On Wed, May 05, 2021 at 12:59:43PM +0200, Peter Zijlstra wrote: > The situation around sched_info is somewhat complicated, it is used by > sched_stats and delayacct and, indirectly, kvm. > > If SCHEDSTATS=Y (but disabled by default) sched_info_on() is > unconditionally true -- this is the case for all distro kernel configs > I checked. > > If for some reason SCHEDSTATS=N, but TASK_DELAY_ACCT=Y, then > sched_info_on() can return false when delayacct is disabled, > presumably because there would be no other users left; except kvm is. > > Instead of complicating matters further by accurately accounting > sched_stat and kvm state, simply unconditionally enable when > SCHED_INFO=Y, matching the common distro case. > > Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> > @@ -163,13 +158,12 @@ static inline void sched_info_reset_dequ > */ > static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t) > { > - unsigned long long now = rq_clock(rq), delta = 0; > + unsigned long long delta = 0; > > - if (sched_info_on()) { > - if (t->sched_info.last_queued) > - delta = now - t->sched_info.last_queued; > + if (t->sched_info.last_queued) { > + delta = rq_clock(rq) - t->sched_info.last_queued; > + t->sched_info.last_queued = 0; > } > - sched_info_reset_dequeued(t); > t->sched_info.run_delay += delta; > > rq_sched_info_dequeue(rq, delta); As delta is !0 iff t->sched_info.last_queued, why not this? diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h index 33ffd41935ba..37e33c0eeb7c 100644 --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h @@ -158,15 +158,14 @@ static inline void psi_sched_switch(struct task_struct *prev, */ static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t) { - unsigned long long delta = 0; - if (t->sched_info.last_queued) { + unsigned long long delta; + delta = rq_clock(rq) - t->sched_info.last_queued; t->sched_info.last_queued = 0; + t->sched_info.run_delay += delta; + rq_sched_info_dequeue(rq, delta); } - t->sched_info.run_delay += delta; - - rq_sched_info_dequeue(rq, delta); } /* > @@ -184,9 +178,10 @@ static void sched_info_arrive(struct rq > { > unsigned long long now = rq_clock(rq), delta = 0; > > - if (t->sched_info.last_queued) > + if (t->sched_info.last_queued) { > delta = now - t->sched_info.last_queued; > - sched_info_reset_dequeued(t); > + t->sched_info.last_queued = 0; > + } > t->sched_info.run_delay += delta; > t->sched_info.last_arrival = now; > t->sched_info.pcount++; Similarly @@ -176,17 +175,18 @@ static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t) */ static void sched_info_arrive(struct rq *rq, struct task_struct *t) { - unsigned long long now = rq_clock(rq), delta = 0; + unsigned long long now = rq_clock(rq); if (t->sched_info.last_queued) { + unsigned long long delta; + delta = now - t->sched_info.last_queued; t->sched_info.last_queued = 0; + t->sched_info.run_delay += delta; + rq_sched_info_arrive(rq, delta); } - t->sched_info.run_delay += delta; t->sched_info.last_arrival = now; t->sched_info.pcount++; - - rq_sched_info_arrive(rq, delta); } /* -- Mel Gorman SUSE Labs