Patch "sched: Fix stop_one_cpu_nowait() vs hotplug" has been added to the 6.5-stable tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is a note to let you know that I've just added the patch titled

    sched: Fix stop_one_cpu_nowait() vs hotplug

to the 6.5-stable tree which can be found at:
    http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     sched-fix-stop_one_cpu_nowait-vs-hotplug.patch
and it can be found in the queue-6.5 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <stable@xxxxxxxxxxxxxxx> know about it.



commit 27dc7dfe10fc77dfcf199c5a36302dd9a9dffbdc
Author: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Date:   Tue Oct 10 20:57:39 2023 +0200

    sched: Fix stop_one_cpu_nowait() vs hotplug
    
    [ Upstream commit f0498d2a54e7966ce23cd7c7ff42c64fa0059b07 ]
    
    Kuyo reported sporadic failures on a sched_setaffinity() vs CPU
    hotplug stress-test -- notably affine_move_task() remains stuck in
    wait_for_completion(), leading to a hung-task detector warning.
    
    Specifically, it was reported that stop_one_cpu_nowait(.fn =
    migration_cpu_stop) returns false -- this stopper is responsible for
    the matching complete().
    
    The race scenario is:
    
            CPU0                                    CPU1
    
                                            // doing _cpu_down()
    
      __set_cpus_allowed_ptr()
        task_rq_lock();
                                            takedown_cpu()
                                              stop_machine_cpuslocked(take_cpu_down..)
    
                                            <PREEMPT: cpu_stopper_thread()
                                              MULTI_STOP_PREPARE
                                              ...
        __set_cpus_allowed_ptr_locked()
          affine_move_task()
            task_rq_unlock();
    
      <PREEMPT: cpu_stopper_thread()\>
        ack_state()
                                              MULTI_STOP_RUN
                                                take_cpu_down()
                                                  __cpu_disable();
                                                  stop_machine_park();
                                                    stopper->enabled = false;
                                             />
       />
            stop_one_cpu_nowait(.fn = migration_cpu_stop);
              if (stopper->enabled) // false!!!
    
    That is, by doing stop_one_cpu_nowait() after dropping rq-lock, the
    stopper thread gets a chance to preempt and allows the cpu-down for
    the target CPU to complete.
    
    OTOH, since stop_one_cpu_nowait() / cpu_stop_queue_work() needs to
    issue a wakeup, it must not be ran under the scheduler locks.
    
    Solve this apparent contradiction by keeping preemption disabled over
    the unlock + queue_stopper combination:
    
            preempt_disable();
            task_rq_unlock(...);
            if (!stop_pending)
              stop_one_cpu_nowait(...)
            preempt_enable();
    
    This respects the lock ordering contraints while still avoiding the
    above race. That is, if we find the CPU is online under rq-lock, the
    targeted stop_one_cpu_nowait() must succeed.
    
    Apply this pattern to all similar stop_one_cpu_nowait() invocations.
    
    Fixes: 6d337eab041d ("sched: Fix migrate_disable() vs set_cpus_allowed_ptr()")
    Reported-by: "Kuyo Chang (張建文)" <Kuyo.Chang@xxxxxxxxxxxx>
    Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
    Tested-by: "Kuyo Chang (張建文)" <Kuyo.Chang@xxxxxxxxxxxx>
    Link: https://lkml.kernel.org/r/20231010200442.GA16515@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
    Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx>

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e8f73ff12126c..149fdb212e20f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2644,9 +2644,11 @@ static int migration_cpu_stop(void *data)
 		 * it.
 		 */
 		WARN_ON_ONCE(!pending->stop_pending);
+		preempt_disable();
 		task_rq_unlock(rq, p, &rf);
 		stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
 				    &pending->arg, &pending->stop_work);
+		preempt_enable();
 		return 0;
 	}
 out:
@@ -2966,12 +2968,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
 			complete = true;
 		}
 
+		preempt_disable();
 		task_rq_unlock(rq, p, rf);
-
 		if (push_task) {
 			stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
 					    p, &rq->push_work);
 		}
+		preempt_enable();
 
 		if (complete)
 			complete_all(&pending->done);
@@ -3037,12 +3040,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
 		if (flags & SCA_MIGRATE_ENABLE)
 			p->migration_flags &= ~MDF_PUSH;
 
+		preempt_disable();
 		task_rq_unlock(rq, p, rf);
-
 		if (!stop_pending) {
 			stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
 					    &pending->arg, &pending->stop_work);
 		}
+		preempt_enable();
 
 		if (flags & SCA_MIGRATE_ENABLE)
 			return 0;
@@ -9507,9 +9511,11 @@ static void balance_push(struct rq *rq)
 	 * Temporarily drop rq->lock such that we can wake-up the stop task.
 	 * Both preemption and IRQs are still disabled.
 	 */
+	preempt_disable();
 	raw_spin_rq_unlock(rq);
 	stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
 			    this_cpu_ptr(&push_work));
+	preempt_enable();
 	/*
 	 * At this point need_resched() is true and we'll take the loop in
 	 * schedule(). The next pick is obviously going to be the stop task
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 58b542bf28934..d78f2e8769fb4 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2449,9 +2449,11 @@ static void pull_dl_task(struct rq *this_rq)
 		double_unlock_balance(this_rq, src_rq);
 
 		if (push_task) {
+			preempt_disable();
 			raw_spin_rq_unlock(this_rq);
 			stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
 					    push_task, &src_rq->push_work);
+			preempt_enable();
 			raw_spin_rq_lock(this_rq);
 		}
 	}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a87ef35d5424a..8e1b54dc2a214 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10986,13 +10986,15 @@ static int load_balance(int this_cpu, struct rq *this_rq,
 				busiest->push_cpu = this_cpu;
 				active_balance = 1;
 			}
-			raw_spin_rq_unlock_irqrestore(busiest, flags);
 
+			preempt_disable();
+			raw_spin_rq_unlock_irqrestore(busiest, flags);
 			if (active_balance) {
 				stop_one_cpu_nowait(cpu_of(busiest),
 					active_load_balance_cpu_stop, busiest,
 					&busiest->active_balance_work);
 			}
+			preempt_enable();
 		}
 	} else {
 		sd->nr_balance_failed = 0;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 185d3d749f6b6..590abc1a013d2 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2109,9 +2109,11 @@ static int push_rt_task(struct rq *rq, bool pull)
 		 */
 		push_task = get_push_task(rq);
 		if (push_task) {
+			preempt_disable();
 			raw_spin_rq_unlock(rq);
 			stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
 					    push_task, &rq->push_work);
+			preempt_enable();
 			raw_spin_rq_lock(rq);
 		}
 
@@ -2448,9 +2450,11 @@ static void pull_rt_task(struct rq *this_rq)
 		double_unlock_balance(this_rq, src_rq);
 
 		if (push_task) {
+			preempt_disable();
 			raw_spin_rq_unlock(this_rq);
 			stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
 					    push_task, &src_rq->push_work);
+			preempt_enable();
 			raw_spin_rq_lock(this_rq);
 		}
 	}



[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux