On Fri, Feb 23, 2024 at 01:17:14PM +0100, Frederic Weisbecker wrote: > diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h > index 866743e0796f..0ff2b554f5b5 100644 > --- a/kernel/rcu/tasks.h > +++ b/kernel/rcu/tasks.h > @@ -973,12 +973,13 @@ static void rcu_tasks_postscan(struct list_head *hop) > for_each_possible_cpu(cpu) { > unsigned long j = jiffies + 1; > struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, cpu); > - struct task_struct *t; > - struct task_struct *t1; > - struct list_head tmp; > > raw_spin_lock_irq_rcu_node(rtpcp); > - list_for_each_entry_safe(t, t1, &rtpcp->rtp_exit_list, rcu_tasks_exit_list) { > + while (!list_empty(&rtpcp->rtp_exit_list)) { > + struct task_struct *t; > + t = list_first_entry(&rtpcp->rtp_exit_list, typeof(*t), rcu_tasks_exit_list); > + list_del_init(&t->rcu_tasks_exit_list); Oh no! The task has to stay in the list for subsequent grace periods! Please forget that suggestion... Yours looks good! Thanks. > + > if (list_empty(&t->rcu_tasks_holdout_list)) > rcu_tasks_pertask(t, hop); > > @@ -987,14 +988,9 @@ static void rcu_tasks_postscan(struct list_head *hop) > if (!IS_ENABLED(CONFIG_PREEMPT_RT) && time_before(jiffies, j)) > continue; > > - // Keep our place in the list while pausing. > - // Nothing else traverses this list, so adding a > - // bare list_head is OK. > - list_add(&tmp, &t->rcu_tasks_exit_list); > raw_spin_unlock_irq_rcu_node(rtpcp); > cond_resched(); // For CONFIG_PREEMPT=n kernels > raw_spin_lock_irq_rcu_node(rtpcp); > - list_del(&tmp); > j = jiffies + 1; > } > raw_spin_unlock_irq_rcu_node(rtpcp); > @@ -1219,7 +1215,6 @@ void exit_tasks_rcu_stop(void) > struct rcu_tasks_percpu *rtpcp; > struct task_struct *t = current; > > - WARN_ON_ONCE(list_empty(&t->rcu_tasks_exit_list)); > rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, t->rcu_tasks_exit_cpu); > raw_spin_lock_irqsave_rcu_node(rtpcp, flags); > list_del_init(&t->rcu_tasks_exit_list); >