On Mon, May 11, 2020 at 9:03 PM Fabian Frederick <fabf@xxxxxxxxx> wrote: > > revert condition and remove clear label > > Signed-off-by: Fabian Frederick <fabf@xxxxxxxxx> Definite NACK on this one. It creates code churn, increases code nesting level and brings very little value. Keep up the good work, Fabian and try to focus on useful cleanups! Thanks, Amir. > --- > fs/notify/mark.c | 37 ++++++++++++++++++------------------- > 1 file changed, 18 insertions(+), 19 deletions(-) > > diff --git a/fs/notify/mark.c b/fs/notify/mark.c > index 1d96216dffd1..ca2eba786bb6 100644 > --- a/fs/notify/mark.c > +++ b/fs/notify/mark.c > @@ -724,28 +724,27 @@ void fsnotify_clear_marks_by_group(struct fsnotify_group *group, > LIST_HEAD(to_free); > struct list_head *head = &to_free; > > - /* Skip selection step if we want to clear all marks. */ > - if (type_mask == FSNOTIFY_OBJ_ALL_TYPES_MASK) { > + if (type_mask != FSNOTIFY_OBJ_ALL_TYPES_MASK) { > + /* > + * We have to be really careful here. Anytime we drop mark_mutex, > + * e.g. fsnotify_clear_marks_by_inode() can come and free marks. > + * Even in our to_free list so we have to use mark_mutex even > + * when accessing that list. And freeing mark requires us to drop > + * mark_mutex. So we can reliably free only the first mark in the > + * list. That's why we first move marks to free to to_free list > + * in one go and then free marks in to_free list one by one. > + */ > + mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); > + list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { > + if ((1U << mark->connector->type) & type_mask) > + list_move(&mark->g_list, &to_free); > + } > + mutex_unlock(&group->mark_mutex); > + } else { > + /* Skip selection step if we want to clear all marks. */ > head = &group->marks_list; > - goto clear; > } > - /* > - * We have to be really careful here. Anytime we drop mark_mutex, e.g. > - * fsnotify_clear_marks_by_inode() can come and free marks. Even in our > - * to_free list so we have to use mark_mutex even when accessing that > - * list. And freeing mark requires us to drop mark_mutex. So we can > - * reliably free only the first mark in the list. That's why we first > - * move marks to free to to_free list in one go and then free marks in > - * to_free list one by one. > - */ > - mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); > - list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { > - if ((1U << mark->connector->type) & type_mask) > - list_move(&mark->g_list, &to_free); > - } > - mutex_unlock(&group->mark_mutex); > > -clear: > while (1) { > mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); > if (list_empty(head)) { > -- > 2.26.2 >