This is a note to let you know that I've just added the patch titled sched: Split out __schedule() deactivate task logic into a helper to the 6.12-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: sched-split-out-__schedule-deactivate-task-logic-int.patch and it can be found in the queue-6.12 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit 6f338f38b1075ce5b946a5ae8ace08613137d522 Author: John Stultz <jstultz@xxxxxxxxxx> Date: Wed Oct 9 16:53:39 2024 -0700 sched: Split out __schedule() deactivate task logic into a helper [ Upstream commit 7b3d61f6578ab06f130ecc13cd2f3010a6c295bb ] As we're going to re-use the deactivation logic, split it into a helper. Signed-off-by: John Stultz <jstultz@xxxxxxxxxx> Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Reviewed-by: Metin Kaya <metin.kaya@xxxxxxx> Reviewed-by: Qais Yousef <qyousef@xxxxxxxxxxx> Tested-by: K Prateek Nayak <kprateek.nayak@xxxxxxx> Tested-by: Metin Kaya <metin.kaya@xxxxxxx> Link: https://lore.kernel.org/r/20241009235352.1614323-7-jstultz@xxxxxxxxxx Stable-dep-of: 7d9da040575b ("psi: Fix race when task wakes up before psi_sched_switch() adjusts flags") Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d07dc87787dff..d794d9bb429fd 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6507,6 +6507,45 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) #define SM_PREEMPT 1 #define SM_RTLOCK_WAIT 2 +/* + * Helper function for __schedule() + * + * If a task does not have signals pending, deactivate it + * Otherwise marks the task's __state as RUNNING + */ +static bool try_to_block_task(struct rq *rq, struct task_struct *p, + unsigned long task_state) +{ + int flags = DEQUEUE_NOCLOCK; + + if (signal_pending_state(task_state, p)) { + WRITE_ONCE(p->__state, TASK_RUNNING); + return false; + } + + p->sched_contributes_to_load = + (task_state & TASK_UNINTERRUPTIBLE) && + !(task_state & TASK_NOLOAD) && + !(task_state & TASK_FROZEN); + + if (unlikely(is_special_task_state(task_state))) + flags |= DEQUEUE_SPECIAL; + + /* + * __schedule() ttwu() + * prev_state = prev->state; if (p->on_rq && ...) + * if (prev_state) goto out; + * p->on_rq = 0; smp_acquire__after_ctrl_dep(); + * p->state = TASK_WAKING + * + * Where __schedule() and ttwu() have matching control dependencies. + * + * After this, schedule() must not care about p->state any more. + */ + block_task(rq, p, flags); + return true; +} + /* * __schedule() is the main scheduler function. * @@ -6615,33 +6654,7 @@ static void __sched notrace __schedule(int sched_mode) goto picked; } } else if (!preempt && prev_state) { - if (signal_pending_state(prev_state, prev)) { - WRITE_ONCE(prev->__state, TASK_RUNNING); - } else { - int flags = DEQUEUE_NOCLOCK; - - prev->sched_contributes_to_load = - (prev_state & TASK_UNINTERRUPTIBLE) && - !(prev_state & TASK_NOLOAD) && - !(prev_state & TASK_FROZEN); - - if (unlikely(is_special_task_state(prev_state))) - flags |= DEQUEUE_SPECIAL; - - /* - * __schedule() ttwu() - * prev_state = prev->state; if (p->on_rq && ...) - * if (prev_state) goto out; - * p->on_rq = 0; smp_acquire__after_ctrl_dep(); - * p->state = TASK_WAKING - * - * Where __schedule() and ttwu() have matching control dependencies. - * - * After this, schedule() must not care about p->state any more. - */ - block_task(rq, prev, flags); - block = true; - } + block = try_to_block_task(rq, prev, prev_state); switch_count = &prev->nvcsw; }