Hello, Greg. On Wed, Nov 28, 2012 at 12:15:42PM -0800, Greg Thelen wrote: > @@ -4276,6 +4276,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) > DEFINE_WAIT(wait); > struct cgroup_event *event, *tmp; > struct cgroup_subsys *ss; > + struct list_head tmp_list; LIST_HEAD(tmp_list); > > lockdep_assert_held(&d->d_inode->i_mutex); > lockdep_assert_held(&cgroup_mutex); > @@ -4330,16 +4331,20 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) > /* > * Unregister events and notify userspace. > * Notify userspace about cgroup removing only after rmdir of cgroup > - * directory to avoid race between userspace and kernelspace > + * directory to avoid race between userspace and kernelspace. Use > + * a temporary list to avoid a deadlock with cgroup_event_wake(). Since > + * cgroup_event_wake() is called with the wait queue head locked, > + * remove_wait_queue() cannot be called while holding event_list_lock. > */ > spin_lock(&cgrp->event_list_lock); > - list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) { > + list_replace_init(&cgrp->event_list, &tmp_list); list_splice_init(); would be more conventional, I think. > + spin_unlock(&cgrp->event_list_lock); > + list_for_each_entry_safe(event, tmp, &tmp_list, list) { > list_del(&event->list); Maybe convert this to list_del_init() while at it? > remove_wait_queue(event->wqh, &event->wait); > eventfd_signal(event->eventfd, 1); > schedule_work(&event->remove); > } > - spin_unlock(&cgrp->event_list_lock); Thanks. -- tejun _______________________________________________ Containers mailing list Containers@xxxxxxxxxxxxxxxxxxxxxxxxxx https://lists.linuxfoundation.org/mailman/listinfo/containers