Factor out init_cpu_worker_pool() from workqueue_init_early(). This is pure reorganization in preparation of BH workqueue support. Signed-off-by: Tejun Heo <tj@xxxxxxxxxx> --- kernel/workqueue.c | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 3f2081bd05a4..f93554e479c4 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -7135,6 +7135,22 @@ static void __init restrict_unbound_cpumask(const char *name, const struct cpuma cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, mask); } +static void __init init_cpu_worker_pool(struct worker_pool *pool, int cpu, int nice) +{ + BUG_ON(init_worker_pool(pool)); + pool->cpu = cpu; + cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); + cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu)); + pool->attrs->nice = nice; + pool->attrs->affn_strict = true; + pool->node = cpu_to_node(cpu); + + /* alloc pool ID */ + mutex_lock(&wq_pool_mutex); + BUG_ON(worker_pool_assign_id(pool)); + mutex_unlock(&wq_pool_mutex); +} + /** * workqueue_init_early - early init for workqueue subsystem * @@ -7195,20 +7211,8 @@ void __init workqueue_init_early(void) struct worker_pool *pool; i = 0; - for_each_cpu_worker_pool(pool, cpu) { - BUG_ON(init_worker_pool(pool)); - pool->cpu = cpu; - cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); - cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu)); - pool->attrs->nice = std_nice[i++]; - pool->attrs->affn_strict = true; - pool->node = cpu_to_node(cpu); - - /* alloc pool ID */ - mutex_lock(&wq_pool_mutex); - BUG_ON(worker_pool_assign_id(pool)); - mutex_unlock(&wq_pool_mutex); - } + for_each_cpu_worker_pool(pool, cpu) + init_cpu_worker_pool(pool, cpu, std_nice[i++]); } /* create default unbound and ordered wq attrs */ -- 2.43.0