On Mon, 31 Jan 2022 10:04:16 -0800 Suren Baghdasaryan wrote: > On Sat, Jan 29, 2022 at 4:13 AM Hillf Danton wrote: > > > > On Fri, 28 Jan 2022 05:09:53 -0800 Michel Lespinasse wrote: > > > + > > > +static LIST_HEAD(destroy_list); > > > +static DEFINE_SPINLOCK(destroy_list_lock); > > > > static bool destroyer_running; > > > > > + > > > +static void destroy_list_workfn(struct work_struct *work) > > > +{ > > > + struct percpu_rw_semaphore *sem, *sem2; > > > + LIST_HEAD(to_destroy); > > > + > > > > again: > > > > > + spin_lock(&destroy_list_lock); > > > > if (list_empty(&destroy_list)) { > > destroyer_running = false; > > spin_unlock(&destroy_list_lock); > > return; > > } > > destroyer_running = true; > > > > > + list_splice_init(&destroy_list, &to_destroy); > > > + spin_unlock(&destroy_list_lock); > > > + > > > + if (list_empty(&to_destroy)) > > > + return; > > > + > > > + list_for_each_entry_safe(sem, sem2, &to_destroy, destroy_list_entry) { > > > > list_del(&sem->destroy_list_entry); > > > > > + percpu_free_rwsem(sem); > > > + kfree(sem); > > > + } > > > > goto again; > > > +} > > > + > > > +static DECLARE_WORK(destroy_list_work, destroy_list_workfn); > > > + > > > +void percpu_rwsem_async_destroy(struct percpu_rw_semaphore *sem) > > > +{ > > > + spin_lock(&destroy_list_lock); > > > + list_add_tail(&sem->destroy_list_entry, &destroy_list); > > > + spin_unlock(&destroy_list_lock); > > > + schedule_work(&destroy_list_work); > > > > Nits > > spin_lock(&destroy_list_lock); > > 1/ /* LIFO */ > > list_add(&sem->destroy_list_entry, &destroy_list); > > 2/ /* spawn worker if it is idle */ > > if (!destroyer_running) > > 3/ /* this is not critical work */ > > queue_work(system_unbound_wq, &destroy_list_work); > > spin_unlock(&destroy_list_lock); > > Thanks for the review! Just to clarify, are you suggesting > simplifications to the current patch or do you see a function issue? Apart from the nits that can be safely ignored in usual spins, I wonder if the async destroy can be used in the contexts wrt raw_spin_lock. Hillf raw_spin_lock_irq(&foo->lock); ... percpu_rwsem_async_destroy(*sem); ... raw_spin_unlock_irq(&foo->lock);