On Mon, May 11, 2020 at 12:58:00PM +0200, Peter Zijlstra wrote: > On Wed, Apr 29, 2020 at 08:31:39PM -0500, Scott Wood wrote: > > > If you were to do a queue_balance_callback() from somewhere in the > > > pick_next_task() machinery, then the balance_callback() at the end of > > > __schedule() would run it, and it'd be gone. How would > > > rt_mutex_setprio() / __sched_setscheduler() be affected? > > > > The rq lock is dropped between queue_balance_callback() and the > > balance_callback() at the end of __schedule(). What stops > > setprio/setscheduler on another cpu from doing the callback at that > > point? > > Hurmm.. fair point, and that might explain some issues I had a while > back. Let me poke a little at that. How's this? --- kernel/sched/core.c | 109 ++++++++++++++++++++++++++++++--------------------- kernel/sched/sched.h | 2 + 2 files changed, 67 insertions(+), 44 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index dfb8ab61cbdd..610e9da557ed 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3078,6 +3078,59 @@ static inline void finish_task(struct task_struct *prev) #endif } +#ifdef CONFIG_SMP + +/* rq->lock is NOT held, but preemption is disabled */ +static void do_balance_callbacks(struct callback_head *head) +{ + void (*func)(struct rq *rq); + struct callback_head *next; + + while (head) { + func = (void (*)(struct rq *))head->func; + next = head->next; + head->next = NULL; + head = next; + + func(rq); + } +} + +static inline struct callback_head *splice_balance_callbacks(struct rq *rq) +{ + struct callback_head *head = rq->balance_callback; + if (head) + rq->balance_callback = NULL; +} + +static void __balance_callbacks(struct rq *rq) +{ + do_balance_callbacks(splice_balance_callbacks(rq)); +} + +static inline void balance_callbacks(struct rq *rq, struct callback_head *head) +{ + unsigned long flags; + + if (unlikely(head)) { + raw_spin_lock_irqsave(&rq->lock, flags); + do_balance_callbacks(head); + raw_spin_unlock_irqrestore(&rq->lock, flags); + } +} + +#else + +static inline void __balance_callbacks(struct rq *rq) +{ +} + +static inline void balance_callbacks(struct rq *rq, struct callback_head *head) +{ +} + +#endif + static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) { @@ -3103,6 +3156,7 @@ static inline void finish_lock_switch(struct rq *rq) * prev into current: */ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); + __balance_callbacks(rq)); raw_spin_unlock_irq(&rq->lock); } @@ -3244,43 +3298,6 @@ static struct rq *finish_task_switch(struct task_struct *prev) return rq; } -#ifdef CONFIG_SMP - -/* rq->lock is NOT held, but preemption is disabled */ -static void __balance_callback(struct rq *rq) -{ - struct callback_head *head, *next; - void (*func)(struct rq *rq); - unsigned long flags; - - raw_spin_lock_irqsave(&rq->lock, flags); - head = rq->balance_callback; - rq->balance_callback = NULL; - while (head) { - func = (void (*)(struct rq *))head->func; - next = head->next; - head->next = NULL; - head = next; - - func(rq); - } - raw_spin_unlock_irqrestore(&rq->lock, flags); -} - -static inline void balance_callback(struct rq *rq) -{ - if (unlikely(rq->balance_callback)) - __balance_callback(rq); -} - -#else - -static inline void balance_callback(struct rq *rq) -{ -} - -#endif - /** * schedule_tail - first thing a freshly forked thread must call. * @prev: the thread we just switched away from. @@ -3300,7 +3317,6 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev) */ rq = finish_task_switch(prev); - balance_callback(rq); preempt_enable(); if (current->set_child_tid) @@ -4090,10 +4106,11 @@ static void __sched notrace __schedule(bool preempt) rq = context_switch(rq, prev, next, &rf); } else { rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); - rq_unlock_irq(rq, &rf); - } - balance_callback(rq); + rq_unpin_lock(rq, &rf); + __balance_callbacks(rq); + raw_spin_unlock_irq(&rq->lock); + } } void __noreturn do_task_dead(void) @@ -4499,9 +4516,11 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) out_unlock: /* Avoid rq from going away on us: */ preempt_disable(); - __task_rq_unlock(rq, &rf); - balance_callback(rq); + rq_unpin_lock(rq, &rf); + __balance_callbacks(rq); + raw_spin_unlock(&rq->lock); + preempt_enable(); } #else @@ -4775,6 +4794,7 @@ static int __sched_setscheduler(struct task_struct *p, int retval, oldprio, oldpolicy = -1, queued, running; int new_effective_prio, policy = attr->sched_policy; const struct sched_class *prev_class; + struct callback_head *head; struct rq_flags rf; int reset_on_fork; int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; @@ -5013,6 +5033,7 @@ static int __sched_setscheduler(struct task_struct *p, /* Avoid rq from going away on us: */ preempt_disable(); + head = splice_balance_callbacks(rq); task_rq_unlock(rq, p, &rf); if (pi) { @@ -5021,7 +5042,7 @@ static int __sched_setscheduler(struct task_struct *p, } /* Run balance callbacks after we've adjusted the PI chain: */ - balance_callback(rq); + balance_callbacks(head); preempt_enable(); return 0; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index d7fc4caf0dfd..3855d354760d 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1189,6 +1189,8 @@ static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) #ifdef CONFIG_SCHED_DEBUG rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); rf->clock_update_flags = 0; + + SCHED_WARN_ON(rq->balance_callback); #endif }