From: Gao Xiang <hsiangkao@xxxxxxxxxx> commit ce063129181312f8781a047a50be439c5859747b upstream. Currently, although set_bit() & test_bit() pairs are used as a fast- path for initialized configurations. However, these atomic ops are actually relaxed forms. Instead, load-acquire & store-release form is needed to make sure uninitialized fields won't be observed in advance here (yet no such corresponding bitops so use full barriers instead.) Link: https://lore.kernel.org/r/20210209130618.15838-1-hsiangkao@xxxxxxx Fixes: 62dc45979f3f ("staging: erofs: fix race of initializing xattrs of a inode at the same time") Fixes: 152a333a5895 ("staging: erofs: add compacted compression indexes support") Cc: <stable@xxxxxxxxxxxxxxx> # 5.3+ Reported-by: Huang Jianan <huangjianan@xxxxxxxx> Reviewed-by: Chao Yu <yuchao0@xxxxxxxxxx> Signed-off-by: Gao Xiang <hsiangkao@xxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- fs/erofs/xattr.c | 10 +++++++++- fs/erofs/zmap.c | 10 +++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) --- a/fs/erofs/xattr.c +++ b/fs/erofs/xattr.c @@ -48,8 +48,14 @@ static int init_inode_xattrs(struct inod int ret = 0; /* the most case is that xattrs of this inode are initialized. */ - if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) + if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) { + /* + * paired with smp_mb() at the end of the function to ensure + * fields will only be observed after the bit is set. + */ + smp_mb(); return 0; + } if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_XATTR_BIT, TASK_KILLABLE)) return -ERESTARTSYS; @@ -137,6 +143,8 @@ static int init_inode_xattrs(struct inod } xattr_iter_end(&it, atomic_map); + /* paired with smp_mb() at the beginning of the function. */ + smp_mb(); set_bit(EROFS_I_EA_INITED_BIT, &vi->flags); out_unlock: --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -36,8 +36,14 @@ static int fill_inode_lazy(struct inode void *kaddr; struct z_erofs_map_header *h; - if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) + if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) { + /* + * paired with smp_mb() at the end of the function to ensure + * fields will only be observed after the bit is set. + */ + smp_mb(); return 0; + } if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE)) return -ERESTARTSYS; @@ -83,6 +89,8 @@ static int fill_inode_lazy(struct inode vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits + ((h->h_clusterbits >> 5) & 7); + /* paired with smp_mb() at the beginning of the function */ + smp_mb(); set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); unmap_done: kunmap_atomic(kaddr);