Signed-off-by: Jeffle Xu <jefflexu@xxxxxxxxxxxxxxxxx> --- fs/erofs/fscache.c | 73 +++++++++++++++++++++++++++++++++++++++++++++ fs/erofs/inode.c | 6 +++- fs/erofs/internal.h | 1 + 3 files changed, 79 insertions(+), 1 deletion(-) diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c index 6fe31d410cbd..c849d3a89520 100644 --- a/fs/erofs/fscache.c +++ b/fs/erofs/fscache.c @@ -1,5 +1,78 @@ #include "internal.h" +/* + * erofs_fscache_readpage + * + * Copy data from backpage (bootstrap) to page of files among erofs. + */ +static int erofs_fscache_readpage(struct file *file, struct page *page) +{ + struct inode *inode = page->mapping->host; + struct super_block *sb = inode->i_sb; + erofs_off_t pos = page->index << PAGE_SHIFT; + struct erofs_map_blocks map = { .m_la = pos }; + erofs_blk_t blkaddr; + struct page *backpage; + u64 total, batch, copied = 0; + char *vsrc, *vdst; /* virtual address of mapped src/dst page */ + char *psrc, *pdst; /* cursor inside src/dst page */ + u64 osrc; /* offset inside src page */ + int err; + + err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); + if (err) + goto out; + + total = min_t(u64, PAGE_SIZE, map.m_plen - (pos - map.m_la)); + blkaddr = map.m_pa >> PAGE_SHIFT; + osrc = map.m_pa & (PAGE_SIZE - 1); + + while (total) { + backpage = erofs_get_meta_page(sb, blkaddr); + if (IS_ERR(backpage)) { + err = PTR_ERR(backpage); + goto out; + } + + vsrc = psrc = kmap_atomic(backpage); + vdst = pdst = kmap_atomic(page); + + psrc += osrc; + pdst += copied; + batch = min_t(u64, PAGE_SIZE - osrc, total); + + memcpy(pdst, psrc, batch); + + copied += batch; + total -= batch; + blkaddr++; + osrc = 0; /* copy from the beginning of the next backpage */ + + /* + * Avoid 'scheduling while atomic' error. Unmap before going + * into the next turn, since we may schedule inside + * erofs_get_meta_page(). + * */ + kunmap_atomic(vsrc); + kunmap_atomic(vdst); + + unlock_page(backpage); + put_page(backpage); + } + +out: + if (err) + SetPageError(page); + else + SetPageUptodate(page); + unlock_page(page); + return err; +} + +const struct address_space_operations erofs_fscache_access_aops = { + .readpage = erofs_fscache_readpage, +}; + static int erofs_begin_cache_operation(struct netfs_read_request *rreq) { return fscache_begin_read_operation(&rreq->cache_resources, diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index 2345f1de438e..452d147277c4 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -299,7 +299,11 @@ static int erofs_fill_inode(struct inode *inode, int isdir) err = z_erofs_fill_inode(inode); goto out_unlock; } - inode->i_mapping->a_ops = &erofs_raw_access_aops; + + if (inode->i_sb->s_bdev) + inode->i_mapping->a_ops = &erofs_raw_access_aops; + else + inode->i_mapping->a_ops = &erofs_fscache_access_aops; out_unlock: unlock_page(page); diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index d60d9ffaef2a..dd3f2edae603 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -353,6 +353,7 @@ struct page *erofs_grab_cache_page_nowait(struct address_space *mapping, extern const struct super_operations erofs_sops; extern const struct address_space_operations erofs_raw_access_aops; +extern const struct address_space_operations erofs_fscache_access_aops; extern const struct address_space_operations z_erofs_aops; /* -- 2.27.0