Having a stable wchan means the process must be blocked and for it to stay that way while performing stack unwinding. Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: Juri Lelli <juri.lelli@xxxxxxxxxx> Cc: Vincent Guittot <vincent.guittot@xxxxxxxxxx> Cc: Dietmar Eggemann <dietmar.eggemann@xxxxxxx> Cc: Steven Rostedt <rostedt@xxxxxxxxxxx> Cc: Ben Segall <bsegall@xxxxxxxxxx> Cc: Mel Gorman <mgorman@xxxxxxx> Cc: Daniel Bristot de Oliveira <bristot@xxxxxxxxxx> Suggested-by: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Signed-off-by: Kees Cook <keescook@xxxxxxxxxxxx> --- include/linux/sched.h | 1 + kernel/sched/core.c | 16 ++++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/include/linux/sched.h b/include/linux/sched.h index 39039ce8ac4c..0c8185089e20 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2137,6 +2137,7 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) #endif /* CONFIG_SMP */ extern bool sched_task_on_rq(struct task_struct *p); +extern unsigned long sched_task_get_wchan(struct task_struct *p); /* * In order to reduce various lock holder preemption latencies provide an diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1bba4128a3e6..4a30455e1ff5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1962,6 +1962,22 @@ bool sched_task_on_rq(struct task_struct *p) return task_on_rq_queued(p); } +unsigned long sched_task_get_wchan(struct task_struct *p) +{ + unsigned int state; + unsigned long ip = 0; + + /* Only get wchan if task is blocked and we can keep it that way. */ + raw_spin_lock_irq(&p->pi_lock); + state = READ_ONCE(p->__state); + smp_rmb(); /* see try_to_wake_up() */ + if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq) + ip = get_wchan(p); + raw_spin_unlock_irq(&p->pi_lock); + + return ip; +} + static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) { if (!(flags & ENQUEUE_NOCLOCK)) -- 2.30.2