On Sat, Jan 29, 2022 at 4:13 AM Hillf Danton <hdanton@xxxxxxxx> wrote: > > On Fri, 28 Jan 2022 05:09:53 -0800 Michel Lespinasse wrote: > > + > > +static LIST_HEAD(destroy_list); > > +static DEFINE_SPINLOCK(destroy_list_lock); > > static bool destroyer_running; > > > + > > +static void destroy_list_workfn(struct work_struct *work) > > +{ > > + struct percpu_rw_semaphore *sem, *sem2; > > + LIST_HEAD(to_destroy); > > + > > again: > > > + spin_lock(&destroy_list_lock); > > if (list_empty(&destroy_list)) { > destroyer_running = false; > spin_unlock(&destroy_list_lock); > return; > } > destroyer_running = true; > > > + list_splice_init(&destroy_list, &to_destroy); > > + spin_unlock(&destroy_list_lock); > > + > > + if (list_empty(&to_destroy)) > > + return; > > + > > + list_for_each_entry_safe(sem, sem2, &to_destroy, destroy_list_entry) { > > list_del(&sem->destroy_list_entry); > > > + percpu_free_rwsem(sem); > > + kfree(sem); > > + } > > goto again; > > +} > > + > > +static DECLARE_WORK(destroy_list_work, destroy_list_workfn); > > + > > +void percpu_rwsem_async_destroy(struct percpu_rw_semaphore *sem) > > +{ > > + spin_lock(&destroy_list_lock); > > + list_add_tail(&sem->destroy_list_entry, &destroy_list); > > + spin_unlock(&destroy_list_lock); > > + schedule_work(&destroy_list_work); > > Nits > spin_lock(&destroy_list_lock); > 1/ /* LIFO */ > list_add(&sem->destroy_list_entry, &destroy_list); > 2/ /* spawn worker if it is idle */ > if (!destroyer_running) > 3/ /* this is not critical work */ > queue_work(system_unbound_wq, &destroy_list_work); > spin_unlock(&destroy_list_lock); Thanks for the review! Just to clarify, are you suggesting simplifications to the current patch or do you see a function issue? > > +} > > -- > > 2.20.1 >