On Thu, Jul 18, 2019 at 11:01:46PM +0800, Herbert Xu wrote: > @@ -376,9 +325,8 @@ void padata_do_serial(struct padata_priv *padata) > > cpu = get_cpu(); > > - /* We need to run on the same CPU padata_do_parallel(.., padata, ..) > - * was called on -- or, at least, enqueue the padata object into the > - * correct per-cpu queue. > + /* We need to enqueue the padata object into the correct > + * per-cpu queue. > */ > if (cpu != padata->cpu) { > reorder_via_wq = 1; > @@ -388,12 +336,12 @@ void padata_do_serial(struct padata_priv *padata) > pqueue = per_cpu_ptr(pd->pqueue, cpu); > > spin_lock(&pqueue->reorder.lock); > - atomic_inc(&pd->reorder_objects); > list_add_tail(&padata->list, &pqueue->reorder.list); > + atomic_inc(&pd->reorder_objects); > spin_unlock(&pqueue->reorder.lock); > > /* > - * Ensure the atomic_inc of reorder_objects above is ordered correctly > + * Ensure the addition to the reorder list is ordered correctly > * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb > * in padata_reorder. > */ > @@ -401,13 +349,7 @@ void padata_do_serial(struct padata_priv *padata) > > put_cpu(); > > - /* If we're running on the wrong CPU, call padata_reorder() via a > - * kernel worker. > - */ > - if (reorder_via_wq) > - queue_work_on(cpu, pd->pinst->wq, &pqueue->reorder_work); > - else > - padata_reorder(pd); > + padata_reorder(pd); > } > EXPORT_SYMBOL(padata_do_serial); If I'm not missing anything, still looks like get_cpu() and reorder_via_wq no longer have an effect with this patch and can be removed.