1
0
Fork 0

padata: Remove broken queue flushing

commit 07928d9bfc upstream.

The function padata_flush_queues is fundamentally broken because
it cannot force padata users to complete the request that is
underway.  IOW padata has to passively wait for the completion
of any outstanding work.

As it stands flushing is used in two places.  Its use in padata_stop
is simply unnecessary because nothing depends on the queues to
be flushed afterwards.

The other use in padata_replace is more substantial as we depend
on it to free the old pd structure.  This patch instead uses the
pd->refcnt to dynamically free the pd structure once all requests
are complete.

Fixes: 2b73b07ab8 ("padata: Flush the padata queues actively")
Cc: <stable@vger.kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
5.4-rM2-2.2.x-imx-squashed
Herbert Xu 2019-11-19 13:17:31 +08:00 committed by Greg Kroah-Hartman
parent 5f63963669
commit 5fefc9b3e3
1 changed files with 12 additions and 31 deletions

View File

@ -35,6 +35,8 @@
#define MAX_OBJ_NUM 1000
static void padata_free_pd(struct parallel_data *pd);
static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
{
int cpu, target_cpu;
@ -283,6 +285,7 @@ static void padata_serial_worker(struct work_struct *serial_work)
struct padata_serial_queue *squeue;
struct parallel_data *pd;
LIST_HEAD(local_list);
int cnt;
local_bh_disable();
squeue = container_of(serial_work, struct padata_serial_queue, work);
@ -292,6 +295,8 @@ static void padata_serial_worker(struct work_struct *serial_work)
list_replace_init(&squeue->serial.list, &local_list);
spin_unlock(&squeue->serial.lock);
cnt = 0;
while (!list_empty(&local_list)) {
struct padata_priv *padata;
@ -301,9 +306,12 @@ static void padata_serial_worker(struct work_struct *serial_work)
list_del_init(&padata->list);
padata->serial(padata);
atomic_dec(&pd->refcnt);
cnt++;
}
local_bh_enable();
if (atomic_sub_and_test(cnt, &pd->refcnt))
padata_free_pd(pd);
}
/**
@ -440,7 +448,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
padata_init_squeues(pd);
atomic_set(&pd->seq_nr, -1);
atomic_set(&pd->reorder_objects, 0);
atomic_set(&pd->refcnt, 0);
atomic_set(&pd->refcnt, 1);
spin_lock_init(&pd->lock);
pd->cpu = cpumask_first(pd->cpumask.pcpu);
INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
@ -466,29 +474,6 @@ static void padata_free_pd(struct parallel_data *pd)
kfree(pd);
}
/* Flush all objects out of the padata queues. */
static void padata_flush_queues(struct parallel_data *pd)
{
int cpu;
struct padata_parallel_queue *pqueue;
struct padata_serial_queue *squeue;
for_each_cpu(cpu, pd->cpumask.pcpu) {
pqueue = per_cpu_ptr(pd->pqueue, cpu);
flush_work(&pqueue->work);
}
if (atomic_read(&pd->reorder_objects))
padata_reorder(pd);
for_each_cpu(cpu, pd->cpumask.cbcpu) {
squeue = per_cpu_ptr(pd->squeue, cpu);
flush_work(&squeue->work);
}
BUG_ON(atomic_read(&pd->refcnt) != 0);
}
static void __padata_start(struct padata_instance *pinst)
{
pinst->flags |= PADATA_INIT;
@ -502,10 +487,6 @@ static void __padata_stop(struct padata_instance *pinst)
pinst->flags &= ~PADATA_INIT;
synchronize_rcu();
get_online_cpus();
padata_flush_queues(pinst->pd);
put_online_cpus();
}
/* Replace the internal control structure with a new one. */
@ -526,8 +507,8 @@ static void padata_replace(struct padata_instance *pinst,
if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu))
notification_mask |= PADATA_CPU_SERIAL;
padata_flush_queues(pd_old);
padata_free_pd(pd_old);
if (atomic_dec_and_test(&pd_old->refcnt))
padata_free_pd(pd_old);
if (notification_mask)
blocking_notifier_call_chain(&pinst->cpumask_change_notifier,