This is a note to let you know that I've just added the patch titled erofs: get rid of erofs_{find,insert}_workgroup to the 6.12-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: erofs-get-rid-of-erofs_-find-insert-_workgroup.patch and it can be found in the queue-6.12 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit df31b3fb4d84962bc7f0268bd82cec1b16996feb Author: Gao Xiang <xiang@xxxxxxxxxx> Date: Mon Oct 21 11:53:21 2024 +0800 erofs: get rid of erofs_{find,insert}_workgroup [ Upstream commit b091e8ed24b7965953147a389bac1dc7c3e8a11c ] Just fold them into the only two callers since they are simple enough. Reviewed-by: Chao Yu <chao@xxxxxxxxxx> Signed-off-by: Gao Xiang <hsiangkao@xxxxxxxxxxxxxxxxx> Link: https://lore.kernel.org/r/20241021035323.3280682-1-hsiangkao@xxxxxxxxxxxxxxxxx Stable-dep-of: db902986dee4 ("erofs: fix potential return value overflow of z_erofs_shrink_scan()") Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 77e785a6dfa7f..b9649e3b2dd56 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -453,10 +453,7 @@ void erofs_release_pages(struct page **pagepool); #ifdef CONFIG_EROFS_FS_ZIP void erofs_workgroup_put(struct erofs_workgroup *grp); -struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, - pgoff_t index); -struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb, - struct erofs_workgroup *grp); +bool erofs_workgroup_get(struct erofs_workgroup *grp); void erofs_workgroup_free_rcu(struct erofs_workgroup *grp); void erofs_shrinker_register(struct super_block *sb); void erofs_shrinker_unregister(struct super_block *sb); diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 1a00f061798a3..8c6082fc86b29 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -714,9 +714,10 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) { struct erofs_map_blocks *map = &fe->map; struct super_block *sb = fe->inode->i_sb; + struct erofs_sb_info *sbi = EROFS_SB(sb); bool ztailpacking = map->m_flags & EROFS_MAP_META; struct z_erofs_pcluster *pcl; - struct erofs_workgroup *grp; + struct erofs_workgroup *grp, *pre; int err; if (!(map->m_flags & EROFS_MAP_ENCODED) || @@ -752,15 +753,23 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) pcl->obj.index = 0; /* which indicates ztailpacking */ } else { pcl->obj.index = erofs_blknr(sb, map->m_pa); - - grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj); - if (IS_ERR(grp)) { - err = PTR_ERR(grp); - goto err_out; + while (1) { + xa_lock(&sbi->managed_pslots); + pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index, + NULL, grp, GFP_KERNEL); + if (!pre || xa_is_err(pre) || erofs_workgroup_get(pre)) { + xa_unlock(&sbi->managed_pslots); + break; + } + /* try to legitimize the current in-tree one */ + xa_unlock(&sbi->managed_pslots); + cond_resched(); } - - if (grp != &pcl->obj) { - fe->pcl = container_of(grp, + if (xa_is_err(pre)) { + err = xa_err(pre); + goto err_out; + } else if (pre) { + fe->pcl = container_of(pre, struct z_erofs_pcluster, obj); err = -EEXIST; goto err_out; @@ -789,7 +798,16 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe) DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL); if (!(map->m_flags & EROFS_MAP_META)) { - grp = erofs_find_workgroup(sb, blknr); + while (1) { + rcu_read_lock(); + grp = xa_load(&EROFS_SB(sb)->managed_pslots, blknr); + if (!grp || erofs_workgroup_get(grp)) { + DBG_BUGON(grp && blknr != grp->index); + rcu_read_unlock(); + break; + } + rcu_read_unlock(); + } } else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) { DBG_BUGON(1); return -EFSCORRUPTED; diff --git a/fs/erofs/zutil.c b/fs/erofs/zutil.c index 37afe20248409..218b0249a4822 100644 --- a/fs/erofs/zutil.c +++ b/fs/erofs/zutil.c @@ -214,7 +214,7 @@ void erofs_release_pages(struct page **pagepool) } } -static bool erofs_workgroup_get(struct erofs_workgroup *grp) +bool erofs_workgroup_get(struct erofs_workgroup *grp) { if (lockref_get_not_zero(&grp->lockref)) return true; @@ -231,54 +231,6 @@ static bool erofs_workgroup_get(struct erofs_workgroup *grp) return true; } -struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, - pgoff_t index) -{ - struct erofs_sb_info *sbi = EROFS_SB(sb); - struct erofs_workgroup *grp; - -repeat: - rcu_read_lock(); - grp = xa_load(&sbi->managed_pslots, index); - if (grp) { - if (!erofs_workgroup_get(grp)) { - /* prefer to relax rcu read side */ - rcu_read_unlock(); - goto repeat; - } - - DBG_BUGON(index != grp->index); - } - rcu_read_unlock(); - return grp; -} - -struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb, - struct erofs_workgroup *grp) -{ - struct erofs_sb_info *const sbi = EROFS_SB(sb); - struct erofs_workgroup *pre; - - DBG_BUGON(grp->lockref.count < 1); -repeat: - xa_lock(&sbi->managed_pslots); - pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index, - NULL, grp, GFP_KERNEL); - if (pre) { - if (xa_is_err(pre)) { - pre = ERR_PTR(xa_err(pre)); - } else if (!erofs_workgroup_get(pre)) { - /* try to legitimize the current in-tree one */ - xa_unlock(&sbi->managed_pslots); - cond_resched(); - goto repeat; - } - grp = pre; - } - xa_unlock(&sbi->managed_pslots); - return grp; -} - static void __erofs_workgroup_free(struct erofs_workgroup *grp) { atomic_long_dec(&erofs_global_shrink_cnt);