On Mon, Apr 15, 2024 at 08:08:12PM -0500, Elizabeth Figura wrote: > + if (atomic_read(&sem->all_hint) > 0) { > + spin_lock(&dev->wait_all_lock); > + spin_lock_nest_lock(&sem->lock, &dev->wait_all_lock); > > + prev_count = sem->u.sem.count; > + ret = post_sem_state(sem, args); > + if (!ret) { > + try_wake_all_obj(dev, sem); > + try_wake_any_sem(sem); > + } > > + spin_unlock(&sem->lock); > + spin_unlock(&dev->wait_all_lock); > + } else { > + spin_lock(&sem->lock); > + > + prev_count = sem->u.sem.count; > + ret = post_sem_state(sem, args); > + if (!ret) > + try_wake_any_sem(sem); > + > + spin_unlock(&sem->lock); > + } > > if (!ret && put_user(prev_count, user_args)) > ret = -EFAULT; vs. > + /* queue ourselves */ > + > + spin_lock(&dev->wait_all_lock); > + > + for (i = 0; i < args.count; i++) { > + struct ntsync_q_entry *entry = &q->entries[i]; > + struct ntsync_obj *obj = entry->obj; > + > + atomic_inc(&obj->all_hint); > + > + /* > + * obj->all_waiters is protected by dev->wait_all_lock rather > + * than obj->lock, so there is no need to acquire obj->lock > + * here. > + */ > + list_add_tail(&entry->node, &obj->all_waiters); > + } This looks racy, consider: atomic_read(all_hints) /* 0 */ spin_lock(wait_all_lock) atomic_inc(all_hint) /* 1 */ list_add_tail() spin_lock(sem->lock) /* try_wake_all_obj() missing */ I've not yet thought about if this is harmful or not, but if not, it definitely needs a comment. Anyway, I need a break, maybe more this evening.