From: Frederic Weisbecker <frederic@xxxxxxxxxx> The rcu_spawn_one_nocb_kthread() function is called only from rcu_spawn_cpu_nocb_kthread(). Therefore, inline the former into the latter, saving a few lines of code. Signed-off-by: Frederic Weisbecker <frederic@xxxxxxxxxx> Cc: Neeraj Upadhyay <quic_neeraju@xxxxxxxxxxx> Cc: Boqun Feng <boqun.feng@xxxxxxxxx> Cc: Uladzislau Rezki <urezki@xxxxxxxxx> Cc: Josh Triplett <josh@xxxxxxxxxxxxxxxx> Cc: Joel Fernandes <joel@xxxxxxxxxxxxxxxxx> Tested-by: Juri Lelli <juri.lelli@xxxxxxxxxx> Signed-off-by: Paul E. McKenney <paulmck@xxxxxxxxxx> --- kernel/rcu/tree_nocb.h | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h index 0c1802ce4764c..eeafb546a7a09 100644 --- a/kernel/rcu/tree_nocb.h +++ b/kernel/rcu/tree_nocb.h @@ -1233,12 +1233,15 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) * rcuo CB kthread, spawn it. Additionally, if the rcuo GP kthread * for this CPU's group has not yet been created, spawn it as well. */ -static void rcu_spawn_one_nocb_kthread(int cpu) +static void rcu_spawn_cpu_nocb_kthread(int cpu) { struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); struct rcu_data *rdp_gp; struct task_struct *t; + if (!rcu_scheduler_fully_active || !rcu_nocb_is_setup) + return; + /* If there already is an rcuo kthread, then nothing to do. */ if (rdp->nocb_cb_kthread) return; @@ -1262,16 +1265,6 @@ static void rcu_spawn_one_nocb_kthread(int cpu) WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread); } -/* - * If the specified CPU is a no-CBs CPU that does not already have its - * rcuo kthread, spawn it. - */ -static void rcu_spawn_cpu_nocb_kthread(int cpu) -{ - if (rcu_scheduler_fully_active && rcu_nocb_is_setup) - rcu_spawn_one_nocb_kthread(cpu); -} - /* * Once the scheduler is running, spawn rcuo kthreads for all online * no-CBs CPUs. This assumes that the early_initcall()s happen before -- 2.31.1.189.g2e36527f23