On Wed, Dec 09, 2020 at 09:27:31PM +0100, Uladzislau Rezki (Sony) wrote: > Initialize the RCU-tasks earlier, before *_initcall() callbacks are > invoked. Do it after the workqueue subsytem is up and running. That > gives us a possibility to make use of synchronize_rcu_tasks*() wait > API in early_initcall() callbacks. > > Fixes: 36dadef23fcc ("kprobes: Init kprobes in early_initcall") > Signed-off-by: Uladzislau Rezki (Sony) <urezki@xxxxxxxxx> Thank you! I have queued both with the usual editing, so please check and let me know if I messed something up. Thanx, Paul > --- > include/linux/rcupdate.h | 6 ++++++ > init/main.c | 1 + > kernel/rcu/tasks.h | 26 ++++++++++++++++++++++---- > 3 files changed, 29 insertions(+), 4 deletions(-) > > diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h > index 3c3efa4d6ab5..340c7d5344a4 100644 > --- a/include/linux/rcupdate.h > +++ b/include/linux/rcupdate.h > @@ -88,6 +88,12 @@ void rcu_sched_clock_irq(int user); > void rcu_report_dead(unsigned int cpu); > void rcutree_migrate_callbacks(int cpu); > > +#ifdef CONFIG_TASKS_RCU_GENERIC > +void rcu_init_tasks_generic(void); > +#else > +static inline void rcu_init_tasks_generic(void) { } > +#endif > + > #ifdef CONFIG_RCU_STALL_COMMON > void rcu_sysrq_start(void); > void rcu_sysrq_end(void); > diff --git a/init/main.c b/init/main.c > index 130376ec10ba..e253e87bdf58 100644 > --- a/init/main.c > +++ b/init/main.c > @@ -1502,6 +1502,7 @@ static noinline void __init kernel_init_freeable(void) > > init_mm_internals(); > > + rcu_init_tasks_generic(); > do_pre_smp_initcalls(); > lockup_detector_init(); > > diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h > index 35bdcfd84d42..67a162949763 100644 > --- a/kernel/rcu/tasks.h > +++ b/kernel/rcu/tasks.h > @@ -241,7 +241,7 @@ static int __noreturn rcu_tasks_kthread(void *arg) > } > } > > -/* Spawn RCU-tasks grace-period kthread, e.g., at core_initcall() time. */ > +/* Spawn RCU-tasks grace-period kthread. */ > static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp) > { > struct task_struct *t; > @@ -564,7 +564,6 @@ static int __init rcu_spawn_tasks_kthread(void) > rcu_spawn_tasks_kthread_generic(&rcu_tasks); > return 0; > } > -core_initcall(rcu_spawn_tasks_kthread); > > #if !defined(CONFIG_TINY_RCU) > void show_rcu_tasks_classic_gp_kthread(void) > @@ -692,7 +691,6 @@ static int __init rcu_spawn_tasks_rude_kthread(void) > rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude); > return 0; > } > -core_initcall(rcu_spawn_tasks_rude_kthread); > > #if !defined(CONFIG_TINY_RCU) > void show_rcu_tasks_rude_gp_kthread(void) > @@ -968,6 +966,12 @@ static void rcu_tasks_trace_pregp_step(void) > static void rcu_tasks_trace_pertask(struct task_struct *t, > struct list_head *hop) > { > + // During early boot when there is only one boot-CPU, > + // an idle_task is not set for other CPUs. In this case > + // just revert. > + if (unlikely(t == NULL)) > + return; > + > WRITE_ONCE(t->trc_reader_special.b.need_qs, false); > WRITE_ONCE(t->trc_reader_checked, false); > t->trc_ipi_to_cpu = -1; > @@ -1193,7 +1197,6 @@ static int __init rcu_spawn_tasks_trace_kthread(void) > rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace); > return 0; > } > -core_initcall(rcu_spawn_tasks_trace_kthread); > > #if !defined(CONFIG_TINY_RCU) > void show_rcu_tasks_trace_gp_kthread(void) > @@ -1222,6 +1225,21 @@ void show_rcu_tasks_gp_kthreads(void) > } > #endif /* #ifndef CONFIG_TINY_RCU */ > > +void __init rcu_init_tasks_generic(void) > +{ > +#ifdef CONFIG_TASKS_RCU > + rcu_spawn_tasks_kthread(); > +#endif > + > +#ifdef CONFIG_TASKS_RUDE_RCU > + rcu_spawn_tasks_rude_kthread(); > +#endif > + > +#ifdef CONFIG_TASKS_TRACE_RCU > + rcu_spawn_tasks_trace_kthread(); > +#endif > +} > + > #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ > static inline void rcu_tasks_bootup_oddness(void) {} > void show_rcu_tasks_gp_kthreads(void) {} > -- > 2.20.1 >