[upstream commit 48f8070f5dd8e13148ae4647780a452d53c457a2] This backport patch for kernel 5.15v is derived from upstream 48f8070f5dd8. On 5.15 kernel it fixes recurring oops in context of rcu detected stalls, indicated below. log : root@ls1021atwr:~# uname -r 5.15.93-rt58+ge0f69a158d5b oops dump stack ** ID_531 main/smp_fsm.c:1884 <inrcu: INFO: rcu_preempt detected stalls on CPUs/tasks: <<< [1] rcu: Tasks blocked on level-0 rcu_node (CPUs 0-1): P116/2:b..l (detected by 1, t=2102 jiffies, g=12741, q=1154) task:irq/31-arm-irq1 state:D stack: 0 pid:116 ppid:2 flags:0x00000000 [<8064b97f>] (__schedule) from [<8064bb01>] (schedule+0x8d/0xc2) [<8064bb01>] (schedule) from [<8064fa65>] (schedule_timeout+0x6d/0xa0) [<8064fa65>] (schedule_timeout) from [<804ba353>] (fsl_ifc_run_command+0x6f/0x178) [<804ba353>] (fsl_ifc_run_command) from [<804ba72f>] (fsl_ifc_cmdfunc+0x203/0x2b8) [<804ba72f>] (fsl_ifc_cmdfunc) from [<804b135f>] .... < snipped > rcu: rcu_preempt kthread timer wakeup didn't happen for 764 jiffies! g12741 f0x0 RCU_GP_WAIT_FQS(5) ->state=0x1000 rcu: Possible timer handling issue on cpu=0 timer-softirq=1095 rcu: rcu_preempt kthread starved for 765 jiffies! g12741 f0x0 RCU_GP_WAIT_FQS(5) ->state=0x1000 ->cpu=0 <<< [2] rcu: Unless rcu_preempt kthread gets sufficient CPU time, OOM is now expected behavior. rcu: RCU grace-period kthread stack dump: task:rcu_preempt state:D stack: 0 pid: 13 ppid: 2 flags:0x00000000 [<8064b97f>] (__schedule) from [<8064ba03>] (schedule_rtlock+0x1b/0x2e) [<8064ba03>] (schedule_rtlock) from [<8064ea6f>] (rtlock_slowlock_locked+0x93/0x108) [<8064ea6f>] (rtlock_slowlock_locked) from [<8064eb1b>] [<8064eb1b>] (rt_spin_lock) from [<8021b723>] (__local_bh_disable_ip+0x6b/0x110) [<8021b723>] (__local_bh_disable_ip) from [<8025a90f>] (del_timer_sync+0x7f/0xe0) [<8025a90f>] (del_timer_sync) from [<8064fa6b>] (schedule_timeout+0x73/0xa0) Exception stack(0x820fffb0 to 0x820ffff8) rcu: Stack dump where RCU GP kthread last ran: ... Sending NMI from CPU 1 to CPUs 0: NMI backtrace for cpu 0 < .. > upstream commit: Signed-off-by: Patrick Wang <patrick.wang.shcn@xxxxxxxxx> Acked-by: Steven Rostedt (Google) <rostedt@xxxxxxxxxxx> Signed-off-by: Paul E. McKenney <paulmck@xxxxxxxxxx> Reviewed-by: Neeraj Upadhyay <quic_neeraju@xxxxxxxxxxx> Signed-off-by: Ronald Monthero <debug.penguin32@xxxxxxxxx> --- kernel/rcu/tree_plugin.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index d070059163d7..36ca6bacd430 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -458,7 +458,7 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp) * be quite short, for example, in the case of the call from * rcu_read_unlock_special(). */ -static void +static notrace void rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) { bool empty_exp; @@ -578,7 +578,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) * is disabled. This function cannot be expected to understand these * nuances, so the caller must handle them. */ -static bool rcu_preempt_need_deferred_qs(struct task_struct *t) +static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t) { return (__this_cpu_read(rcu_data.exp_deferred_qs) || READ_ONCE(t->rcu_read_unlock_special.s)) && @@ -592,7 +592,7 @@ static bool rcu_preempt_need_deferred_qs(struct task_struct *t) * evaluate safety in terms of interrupt, softirq, and preemption * disabling. */ -static void rcu_preempt_deferred_qs(struct task_struct *t) +static notrace void rcu_preempt_deferred_qs(struct task_struct *t) { unsigned long flags; @@ -922,7 +922,7 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp) * Because there is no preemptible RCU, there can be no deferred quiescent * states. */ -static bool rcu_preempt_need_deferred_qs(struct task_struct *t) +static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t) { return false; } -- 2.34.1