1
0
Fork 0

padata, pcrypt: take CPU hotplug lock internally in padata_alloc_possible

With pcrypt's cpumask no longer used, take the CPU hotplug lock inside
padata_alloc_possible.

Useful later in the series for avoiding nested acquisition of the CPU
hotplug lock in padata when padata_alloc_possible is allocating an
unbound workqueue.

Without this patch, this nested acquisition would happen later in the
series:

      pcrypt_init_padata
        get_online_cpus
        alloc_padata_possible
          alloc_padata
            alloc_workqueue(WQ_UNBOUND)   // later in the series
              alloc_and_link_pwqs
                apply_wqattrs_lock
                  get_online_cpus         // recursive rwsem acquisition

Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Acked-by: Steffen Klassert <steffen.klassert@secunet.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: linux-crypto@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
alistair/sunxi64-5.4-dsi
Daniel Jordan 2019-09-05 21:40:26 -04:00 committed by Herbert Xu
parent 63d3578892
commit cc491d8e64
2 changed files with 9 additions and 12 deletions

View File

@ -308,8 +308,6 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name)
{
int ret = -ENOMEM;
get_online_cpus();
*pinst = padata_alloc_possible(name);
if (!*pinst)
return ret;
@ -318,8 +316,6 @@ static int pcrypt_init_padata(struct padata_instance **pinst, const char *name)
if (ret)
padata_free(*pinst);
put_online_cpus();
return ret;
}

View File

@ -955,8 +955,6 @@ static struct kobj_type padata_attr_type = {
* @name: used to identify the instance
* @pcpumask: cpumask that will be used for padata parallelization
* @cbcpumask: cpumask that will be used for padata serialization
*
* Must be called from a cpus_read_lock() protected region
*/
static struct padata_instance *padata_alloc(const char *name,
const struct cpumask *pcpumask,
@ -974,11 +972,13 @@ static struct padata_instance *padata_alloc(const char *name,
if (!pinst->wq)
goto err_free_inst;
get_online_cpus();
if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
goto err_free_wq;
goto err_put_cpus;
if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
free_cpumask_var(pinst->cpumask.pcpu);
goto err_free_wq;
goto err_put_cpus;
}
if (!padata_validate_cpumask(pinst, pcpumask) ||
!padata_validate_cpumask(pinst, cbcpumask))
@ -1002,12 +1002,16 @@ static struct padata_instance *padata_alloc(const char *name,
#ifdef CONFIG_HOTPLUG_CPU
cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node);
#endif
put_online_cpus();
return pinst;
err_free_masks:
free_cpumask_var(pinst->cpumask.pcpu);
free_cpumask_var(pinst->cpumask.cbcpu);
err_free_wq:
err_put_cpus:
put_online_cpus();
destroy_workqueue(pinst->wq);
err_free_inst:
kfree(pinst);
@ -1021,12 +1025,9 @@ err:
* parallel workers.
*
* @name: used to identify the instance
*
* Must be called from a cpus_read_lock() protected region
*/
struct padata_instance *padata_alloc_possible(const char *name)
{
lockdep_assert_cpus_held();
return padata_alloc(name, cpu_possible_mask, cpu_possible_mask);
}
EXPORT_SYMBOL(padata_alloc_possible);