On Mon, Apr 25, 2022 at 08:21:42PM +0800, Jeffle Xu wrote: > Implement fscache-based data readahead. Also registers an individual ^ register > bdi for each erofs instance to enable readahead. > > Signed-off-by: Jeffle Xu <jefflexu@xxxxxxxxxxxxxxxxx> Reviewed-by: Gao Xiang <hsiangkao@xxxxxxxxxxxxxxxxx> Thanks, Gao Xiang > --- > fs/erofs/fscache.c | 90 ++++++++++++++++++++++++++++++++++++++++++++++ > fs/erofs/super.c | 4 +++ > 2 files changed, 94 insertions(+) > > diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c > index 5b779812a5ee..a402d8f0a063 100644 > --- a/fs/erofs/fscache.c > +++ b/fs/erofs/fscache.c > @@ -162,12 +162,102 @@ static int erofs_fscache_readpage(struct file *file, struct page *page) > return ret; > } > > +static void erofs_fscache_unlock_folios(struct readahead_control *rac, > + size_t len) > +{ > + while (len) { > + struct folio *folio = readahead_folio(rac); > + > + len -= folio_size(folio); > + folio_mark_uptodate(folio); > + folio_unlock(folio); > + } > +} > + > +static void erofs_fscache_readahead(struct readahead_control *rac) > +{ > + struct inode *inode = rac->mapping->host; > + struct super_block *sb = inode->i_sb; > + size_t len, count, done = 0; > + erofs_off_t pos; > + loff_t start, offset; > + int ret; > + > + if (!readahead_count(rac)) > + return; > + > + start = readahead_pos(rac); > + len = readahead_length(rac); > + > + do { > + struct erofs_map_blocks map; > + struct erofs_map_dev mdev; > + > + pos = start + done; > + map.m_la = pos; > + > + ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); > + if (ret) > + return; > + > + offset = start + done; > + count = min_t(size_t, map.m_llen - (pos - map.m_la), > + len - done); > + > + if (!(map.m_flags & EROFS_MAP_MAPPED)) { > + struct iov_iter iter; > + > + iov_iter_xarray(&iter, READ, &rac->mapping->i_pages, > + offset, count); > + iov_iter_zero(count, &iter); > + > + erofs_fscache_unlock_folios(rac, count); > + ret = count; > + continue; > + } > + > + if (map.m_flags & EROFS_MAP_META) { > + struct folio *folio = readahead_folio(rac); > + > + ret = erofs_fscache_readpage_inline(folio, &map); > + if (!ret) { > + folio_mark_uptodate(folio); > + ret = folio_size(folio); > + } > + > + folio_unlock(folio); > + continue; > + } > + > + mdev = (struct erofs_map_dev) { > + .m_deviceid = map.m_deviceid, > + .m_pa = map.m_pa, > + }; > + ret = erofs_map_dev(sb, &mdev); > + if (ret) > + return; > + > + ret = erofs_fscache_read_folios(mdev.m_fscache->cookie, > + rac->mapping, offset, count, > + mdev.m_pa + (pos - map.m_la)); > + /* > + * For the error cases, the folios will be unlocked when > + * .readahead() returns. > + */ > + if (!ret) { > + erofs_fscache_unlock_folios(rac, count); > + ret = count; > + } > + } while (ret > 0 && ((done += ret) < len)); > +} > + > static const struct address_space_operations erofs_fscache_meta_aops = { > .readpage = erofs_fscache_meta_readpage, > }; > > const struct address_space_operations erofs_fscache_access_aops = { > .readpage = erofs_fscache_readpage, > + .readahead = erofs_fscache_readahead, > }; > > int erofs_fscache_register_cookie(struct super_block *sb, > diff --git a/fs/erofs/super.c b/fs/erofs/super.c > index c6755bcae4a6..f68ba929100d 100644 > --- a/fs/erofs/super.c > +++ b/fs/erofs/super.c > @@ -619,6 +619,10 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) > sbi->opt.fsid, true); > if (err) > return err; > + > + err = super_setup_bdi(sb); > + if (err) > + return err; > } else { > if (!sb_set_blocksize(sb, EROFS_BLKSIZ)) { > erofs_err(sb, "failed to set erofs blksize"); > -- > 2.27.0