The following commit has been merged into the core/rcu branch of tip: Commit-ID: 8ae0ae6737ad449c8ae21e2bb01d9736f360a933 Gitweb: https://git.kernel.org/tip/8ae0ae6737ad449c8ae21e2bb01d9736f360a933 Author: Thomas Gleixner <tglx@xxxxxxxxxxxxx> AuthorDate: Sun, 03 May 2020 15:08:52 +02:00 Committer: Thomas Gleixner <tglx@xxxxxxxxxxxxx> CommitterDate: Tue, 19 May 2020 15:51:21 +02:00 rcu: Provide rcu_irq_exit_preempt() Interrupts and exceptions invoke rcu_irq_enter() on entry and need to invoke rcu_irq_exit() before they either return to the interrupted code or invoke the scheduler due to preemption. The general assumption is that RCU idle code has to have preemption disabled so that a return from interrupt cannot schedule. So the return from interrupt code invokes rcu_irq_exit() and preempt_schedule_irq(). If there is any imbalance in the rcu_irq/nmi* invocations or RCU idle code had preemption enabled then this goes unnoticed until the CPU goes idle or some other RCU check is executed. Provide rcu_irq_exit_preempt() which can be invoked from the interrupt/exception return code in case that preemption is enabled. It invokes rcu_irq_exit() and contains a few sanity checks in case that CONFIG_PROVE_RCU is enabled to catch such issues directly. Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Reviewed-by: Paul E. McKenney <paulmck@xxxxxxxxxx> Reviewed-by: Alexandre Chartre <alexandre.chartre@xxxxxxxxxx> Acked-by: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Link: https://lkml.kernel.org/r/20200505134904.364456424@xxxxxxxxxxxxx --- include/linux/rcutiny.h | 1 + include/linux/rcutree.h | 1 + kernel/rcu/tree.c | 22 ++++++++++++++++++++++ 3 files changed, 24 insertions(+) diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 3465ba7..980eb78 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -71,6 +71,7 @@ static inline void rcu_irq_enter(void) { } static inline void rcu_irq_exit_irqson(void) { } static inline void rcu_irq_enter_irqson(void) { } static inline void rcu_irq_exit(void) { } +static inline void rcu_irq_exit_preempt(void) { } static inline void exit_rcu(void) { } static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t) { diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index fbc2627..02016e0 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -47,6 +47,7 @@ void rcu_idle_enter(void); void rcu_idle_exit(void); void rcu_irq_enter(void); void rcu_irq_exit(void); +void rcu_irq_exit_preempt(void); void rcu_irq_enter_irqson(void); void rcu_irq_exit_irqson(void); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 9454016..62ee012 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -743,6 +743,28 @@ void noinstr rcu_irq_exit(void) rcu_nmi_exit(); } +/** + * rcu_irq_exit_preempt - Inform RCU that current CPU is exiting irq + * towards in kernel preemption + * + * Same as rcu_irq_exit() but has a sanity check that scheduling is safe + * from RCU point of view. Invoked from return from interrupt before kernel + * preemption. + */ +void rcu_irq_exit_preempt(void) +{ + lockdep_assert_irqs_disabled(); + rcu_nmi_exit(); + + RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0, + "RCU dynticks_nesting counter underflow/zero!"); + RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) != + DYNTICK_IRQ_NONIDLE, + "Bad RCU dynticks_nmi_nesting counter\n"); + RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(), + "RCU in extended quiescent state!"); +} + /* * Wrapper for rcu_irq_exit() where interrupts are enabled. *