Currently cpumask_available() is used to prevent from unwanted NOCB initialization. However if neither "rcu_nocbs=" nor "nohz_full=" parameters are passed but CONFIG_CPUMASK_OFFSTACK=n, the initialization path is still taken, running through all sorts of needless operations and iterations on an empty cpumask. Fix this with relying on a real initialization state instead. This also optimize kthreads creation, sparing iteration over all online CPUs when nocb isn't initialized. Signed-off-by: Frederic Weisbecker <frederic@xxxxxxxxxx> Cc: Neeraj Upadhyay <quic_neeraju@xxxxxxxxxxx> Cc: Boqun Feng <boqun.feng@xxxxxxxxx> Cc: Uladzislau Rezki <urezki@xxxxxxxxx> Cc: Josh Triplett <josh@xxxxxxxxxxxxxxxx> Cc: Joel Fernandes <joel@xxxxxxxxxxxxxxxxx> --- kernel/rcu/tree_nocb.h | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index e1cb06840454..d8ed3ee47a67 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -60,6 +60,9 @@ static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp) * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. * If the list is invalid, a warning is emitted and all CPUs are offloaded. */ + +static bool rcu_nocb_is_setup; + static int __init rcu_nocb_setup(char *str) { alloc_bootmem_cpumask_var(&rcu_nocb_mask); @@ -67,6 +70,7 @@ static int __init rcu_nocb_setup(char *str) pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n"); cpumask_setall(rcu_nocb_mask); } + rcu_nocb_is_setup = true; return 1; } __setup("rcu_nocbs=", rcu_nocb_setup); @@ -1159,13 +1163,17 @@ void __init rcu_init_nohz(void) need_rcu_nocb_mask = true; #endif /* #if defined(CONFIG_NO_HZ_FULL) */ - if (!cpumask_available(rcu_nocb_mask) && need_rcu_nocb_mask) { - if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) { - pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n"); - return; + if (need_rcu_nocb_mask) { + if (!cpumask_available(rcu_nocb_mask)) { + if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) { + pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n"); + return; + } } + rcu_nocb_is_setup = true; } - if (!cpumask_available(rcu_nocb_mask)) + + if (!rcu_nocb_is_setup) return; #if defined(CONFIG_NO_HZ_FULL) @@ -1267,8 +1275,10 @@ static void __init rcu_spawn_nocb_kthreads(void) { int cpu; - for_each_online_cpu(cpu) - rcu_spawn_cpu_nocb_kthread(cpu); + if (rcu_nocb_is_setup) { + for_each_online_cpu(cpu) + rcu_spawn_cpu_nocb_kthread(cpu); + } } /* How many CB CPU IDs per GP kthread? Default of -1 for sqrt(nr_cpu_ids). */ -- 2.25.1