With pcrypt's cpumask no longer used, take the CPU hotplug lock inside padata_alloc_possible. Useful later in the series for avoiding nested acquisition of the CPU hotplug lock in padata when padata_alloc_possible is allocating an unbound workqueue. Without this patch, this nested acquisition would happen later in the series: pcrypt_init_padata get_online_cpus alloc_padata_possible alloc_padata alloc_workqueue(WQ_UNBOUND) // later in the series alloc_and_link_pwqs apply_wqattrs_lock get_online_cpus // recursive rwsem acquisition Signed-off-by: Daniel Jordan <daniel.m.jordan@xxxxxxxxxx> Acked-by: Steffen Klassert <steffen.klassert@xxxxxxxxxxx> Cc: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx> Cc: Lai Jiangshan <jiangshanlai@xxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Tejun Heo <tj@xxxxxxxxxx> Cc: linux-crypto@xxxxxxxxxxxxxxx Cc: linux-kernel@xxxxxxxxxxxxxxx --- crypto/pcrypt.c | 4 ---- kernel/padata.c | 17 +++++++++-------- 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 2ec36e6a132f..543792e0ebf0 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -308,8 +308,6 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name) { int ret = -ENOMEM; - get_online_cpus(); - *pinst = padata_alloc_possible(name); if (!*pinst) return ret; @@ -318,8 +316,6 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name) if (ret) padata_free(*pinst); - put_online_cpus(); - return ret; } diff --git a/kernel/padata.c b/kernel/padata.c index 863053a1e379..29d44edd6733 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -907,8 +907,6 @@ static struct kobj_type padata_attr_type = { * @name: used to identify the instance * @pcpumask: cpumask that will be used for padata parallelization * @cbcpumask: cpumask that will be used for padata serialization - * - * Must be called from a cpus_read_lock() protected region */ static struct padata_instance *padata_alloc(const char *name, const struct cpumask *pcpumask, @@ -926,11 +924,13 @@ static struct padata_instance *padata_alloc(const char *name, if (!pinst->wq) goto err_free_inst; + get_online_cpus(); + if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) - goto err_free_wq; + goto err_put_cpus; if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { free_cpumask_var(pinst->cpumask.pcpu); - goto err_free_wq; + goto err_put_cpus; } if (!padata_validate_cpumask(pinst, pcpumask) || !padata_validate_cpumask(pinst, cbcpumask)) @@ -956,12 +956,16 @@ static struct padata_instance *padata_alloc(const char *name, cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD, &pinst->node); #endif + + put_online_cpus(); + return pinst; err_free_masks: free_cpumask_var(pinst->cpumask.pcpu); free_cpumask_var(pinst->cpumask.cbcpu); -err_free_wq: +err_put_cpus: + put_online_cpus(); destroy_workqueue(pinst->wq); err_free_inst: kfree(pinst); @@ -975,12 +979,9 @@ static struct padata_instance *padata_alloc(const char *name, * parallel workers. * * @name: used to identify the instance - * - * Must be called from a cpus_read_lock() protected region */ struct padata_instance *padata_alloc_possible(const char *name) { - lockdep_assert_cpus_held(); return padata_alloc(name, cpu_possible_mask, cpu_possible_mask); } EXPORT_SYMBOL(padata_alloc_possible); -- 2.23.0