3.19.8-ckt5 -stable review patch. If anyone has any objections, please let me know. ------------------ From: Jan Kara <jack@xxxxxxx> commit a2673b6e040663bf16a552f8619e6bde9f4b9acf upstream. fsnotify_clear_marks_by_group_flags() can race with fsnotify_destroy_marks() so when fsnotify_destroy_mark_locked() drops mark_mutex, a mark from the list iterated by fsnotify_clear_marks_by_group_flags() can be freed and we dereference free memory in the loop there. Fix the problem by keeping mark_mutex held in fsnotify_destroy_mark_locked(). The reason why we drop that mutex is that we need to call a ->freeing_mark() callback which may acquire mark_mutex again. To avoid this and similar lock inversion issues, we move the call to ->freeing_mark() callback to the kthread destroying the mark. Signed-off-by: Jan Kara <jack@xxxxxxx> Reported-by: Ashish Sangwan <a.sangwan@xxxxxxxxxxx> Suggested-by: Lino Sanfilippo <LinoSanfilippo@xxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Signed-off-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Signed-off-by: Kamal Mostafa <kamal@xxxxxxxxxxxxx> --- fs/notify/mark.c | 34 ++++++++++++++-------------------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/fs/notify/mark.c b/fs/notify/mark.c index 92e48c7..3e594ce4 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -152,31 +152,15 @@ void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark, BUG(); list_del_init(&mark->g_list); - spin_unlock(&mark->lock); if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) iput(inode); - /* release lock temporarily */ - mutex_unlock(&group->mark_mutex); spin_lock(&destroy_lock); list_add(&mark->g_list, &destroy_list); spin_unlock(&destroy_lock); wake_up(&destroy_waitq); - /* - * We don't necessarily have a ref on mark from caller so the above destroy - * may have actually freed it, unless this group provides a 'freeing_mark' - * function which must be holding a reference. - */ - - /* - * Some groups like to know that marks are being freed. This is a - * callback to the group function to let it know that this mark - * is being freed. - */ - if (group->ops->freeing_mark) - group->ops->freeing_mark(mark, group); /* * __fsnotify_update_child_dentry_flags(inode); @@ -191,8 +175,6 @@ void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark, */ atomic_dec(&group->num_marks); - - mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); } void fsnotify_destroy_mark(struct fsnotify_mark *mark, @@ -205,7 +187,10 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark, /* * Destroy all marks in the given list. The marks must be already detached from - * the original inode / vfsmount. + * the original inode / vfsmount. Note that we can race with + * fsnotify_clear_marks_by_group_flags(). However we hold a reference to each + * mark so they won't get freed from under us and nobody else touches our + * free_list list_head. */ void fsnotify_destroy_marks(struct list_head *to_free) { @@ -406,7 +391,7 @@ struct fsnotify_mark *fsnotify_find_mark(struct hlist_head *head, } /* - * clear any marks in a group in which mark->flags & flags is true + * Clear any marks in a group in which mark->flags & flags is true. */ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags) @@ -460,6 +445,7 @@ static int fsnotify_mark_destroy(void *ignored) { struct fsnotify_mark *mark, *next; struct list_head private_destroy_list; + struct fsnotify_group *group; for (;;) { spin_lock(&destroy_lock); @@ -471,6 +457,14 @@ static int fsnotify_mark_destroy(void *ignored) list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) { list_del_init(&mark->g_list); + group = mark->group; + /* + * Some groups like to know that marks are being freed. + * This is a callback to the group function to let it + * know that this mark is being freed. + */ + if (group && group->ops->freeing_mark) + group->ops->freeing_mark(mark, group); fsnotify_put_mark(mark); } -- 1.9.1 -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html