Some filesystems (e.g. jffs2) lock the same resources for both readdir and lookup, leading to a deadlock in ovl_dir_read_merged, which uses a lookup in the filldir callback for the upper filesystem to identify whiteout entries. Fix this by moving all links to the head of the list and marking whiteouts after readdir. Signed-off-by: Felix Fietkau <nbd@xxxxxxxxxxx> --- fs/overlayfs/overlayfs.c | 61 +++++++++++++++++++++++++++------------------- 1 files changed, 36 insertions(+), 25 deletions(-) diff --git a/fs/overlayfs/overlayfs.c b/fs/overlayfs/overlayfs.c index dca953e..10ca9c0 100644 --- a/fs/overlayfs/overlayfs.c +++ b/fs/overlayfs/overlayfs.c @@ -253,8 +253,7 @@ static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root, } static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len, - u64 ino, unsigned int d_type, - bool is_whiteout) + u64 ino, unsigned int d_type) { struct ovl_cache_entry *p; @@ -267,7 +266,7 @@ static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len, p->len = len; p->type = d_type; p->ino = ino; - p->is_whiteout = is_whiteout; + p->is_whiteout = false; } return p; @@ -275,7 +274,7 @@ static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len, static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd, const char *name, int len, u64 ino, - unsigned int d_type, bool is_whiteout) + unsigned int d_type) { struct rb_node **newp = &rdd->root->rb_node; struct rb_node *parent = NULL; @@ -296,11 +295,18 @@ static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd, return 0; } - p = ovl_cache_entry_new(name, len, ino, d_type, is_whiteout); + p = ovl_cache_entry_new(name, len, ino, d_type); if (p == NULL) return -ENOMEM; - list_add_tail(&p->l_node, rdd->list); + /* + * Add links before other types to be able to quicky mark + * any whiteout entries + */ + if (d_type == DT_LNK) + list_add(&p->l_node, rdd->list); + else + list_add_tail(&p->l_node, rdd->list); rb_link_node(&p->node, parent, newp); rb_insert_color(&p->node, rdd->root); @@ -318,7 +324,7 @@ static int ovl_fill_lower(void *buf, const char *name, int namelen, if (p) { list_move_tail(&p->l_node, rdd->middle); } else { - p = ovl_cache_entry_new(name, namelen, ino, d_type, false); + p = ovl_cache_entry_new(name, namelen, ino, d_type); if (p == NULL) rdd->err = -ENOMEM; else @@ -343,26 +349,9 @@ static int ovl_fill_upper(void *buf, const char *name, int namelen, loff_t offset, u64 ino, unsigned int d_type) { struct ovl_readdir_data *rdd = buf; - bool is_whiteout = false; rdd->count++; - if (d_type == DT_LNK) { - struct dentry *dentry; - - dentry = lookup_one_len(name, rdd->dir, namelen); - if (IS_ERR(dentry)) { - rdd->err = PTR_ERR(dentry); - goto out; - } - is_whiteout = ovl_is_whiteout(dentry); - dput(dentry); - } - - rdd->err = ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type, - is_whiteout); - -out: - return rdd->err; + return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type); } static int ovl_dir_read(struct path *realpath, struct ovl_readdir_data *rdd, @@ -428,6 +417,26 @@ static void ovl_dir_reset(struct file *file) } } +static void ovl_dir_mark_whiteouts(struct ovl_readdir_data *rdd) +{ + struct ovl_cache_entry *p; + struct dentry *dentry; + + mutex_lock(&rdd->dir->d_inode->i_mutex); + list_for_each_entry(p, rdd->list, l_node) { + if (p->type != DT_LNK) + break; + + dentry = lookup_one_len(p->name, rdd->dir, p->len); + if (IS_ERR(dentry)) + continue; + + p->is_whiteout = ovl_is_whiteout(dentry); + dput(dentry); + } + mutex_unlock(&rdd->dir->d_inode->i_mutex); +} + static int ovl_dir_read_merged(struct path *upperpath, struct path *lowerpath, struct ovl_readdir_data *rdd) { @@ -441,6 +450,8 @@ static int ovl_dir_read_merged(struct path *upperpath, struct path *lowerpath, err = ovl_dir_read(upperpath, rdd, ovl_fill_upper); if (err) goto out; + + ovl_dir_mark_whiteouts(rdd); } /* * Insert lowerpath entries before upperpath ones, this allows -- 1.7.3.2 -- To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html