Without this, rcu_note_context_switch() will complain if an RCU read lock is held when migrate_enable() calls stop_one_cpu(). Likewise when migrate_disable() calls pin_current_cpu() which calls __read_rt_lock() -- which bypasses the part of the mutex code that calls rt_invol_sleep_inc(). Signed-off-by: Scott Wood <swood@xxxxxxxxxx> --- v3: Add to pin_current_cpu as well include/linux/sched.h | 4 ++-- kernel/cpu.c | 2 ++ kernel/rcu/tree_plugin.h | 2 +- kernel/sched/core.c | 4 ++++ 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index edc93b74f7d8..ecf5cbb23335 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -673,7 +673,7 @@ struct task_struct { int migrate_disable_atomic; # endif #endif -#ifdef CONFIG_PREEMPT_RT_FULL +#ifdef CONFIG_PREEMPT_RT_BASE /* Task is blocking due to RT-specific mechanisms, not voluntarily */ int rt_invol_sleep; #endif @@ -1882,7 +1882,7 @@ static __always_inline bool need_resched(void) return unlikely(tif_need_resched()); } -#ifdef CONFIG_PREEMPT_RT_FULL +#ifdef CONFIG_PREEMPT_RT_BASE static inline void rt_invol_sleep_inc(void) { current->rt_invol_sleep++; diff --git a/kernel/cpu.c b/kernel/cpu.c index 885a195dfbe0..32c6175b63b6 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -308,7 +308,9 @@ void pin_current_cpu(void) preempt_lazy_enable(); preempt_enable(); + rt_invol_sleep_inc(); __read_rt_lock(cpuhp_pin); + rt_invol_sleep_dec(); preempt_disable(); preempt_lazy_disable(); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 0da4b975cd71..6d92dafeeca5 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -292,7 +292,7 @@ void rcu_note_context_switch(bool preempt) barrier(); /* Avoid RCU read-side critical sections leaking down. */ trace_rcu_utilization(TPS("Start context switch")); lockdep_assert_irqs_disabled(); -#if defined(CONFIG_PREEMPT_RT_FULL) +#if defined(CONFIG_PREEMPT_RT_BASE) rt_invol = t->rt_invol_sleep; #endif WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0 && !rt_invol); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e1bdd7f9be05..a151332474d8 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7405,7 +7405,11 @@ void migrate_enable(void) unpin_current_cpu(); preempt_lazy_enable(); preempt_enable(); + + rt_invol_sleep_inc(); stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); + rt_invol_sleep_dec(); + return; } } -- 1.8.3.1