On 06/06/2022 16:03, Hsin-Yi Wang wrote:
Implement readahead callback for squashfs. It will read datablocks
which cover pages in readahead request. For a few cases it will
not mark page as uptodate, including:
- file end is 0.
- zero filled blocks.
- current batch of pages isn't in the same datablock.
- decompressor error.
Otherwise pages will be marked as uptodate. The unhandled pages will be
updated by readpage later.
Suggested-by: Matthew Wilcox <willy@xxxxxxxxxxxxx>
Signed-off-by: Hsin-Yi Wang <hsinyi@xxxxxxxxxxxx>
Reported-by: Matthew Wilcox <willy@xxxxxxxxxxxxx>
Reported-by: Phillip Lougher <phillip@xxxxxxxxxxxxxxx>
Reported-by: Xiongwei Song <Xiongwei.Song@xxxxxxxxxxxxx>
Reported-by: Marek Szyprowski <m.szyprowski@xxxxxxxxxxx>
Reported-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---
v4->v5:
- Handle short file cases reported by Marek and Matthew.
- Fix checkpatch error reported by Andrew.
Thanks for the updated patch. I'm currently testing and
reviewing the patch, and this may take a couple of days.
Phillip
v4: https://lore.kernel.org/lkml/20220601103922.1338320-4-hsinyi@xxxxxxxxxxxx/
v3: https://lore.kernel.org/lkml/20220523065909.883444-4-hsinyi@xxxxxxxxxxxx/
v2: https://lore.kernel.org/lkml/20220517082650.2005840-4-hsinyi@xxxxxxxxxxxx/
v1: https://lore.kernel.org/lkml/20220516105100.1412740-3-hsinyi@xxxxxxxxxxxx/
---
fs/squashfs/file.c | 124 ++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 123 insertions(+), 1 deletion(-)
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index a8e495d8eb86..fbd096cd15f4 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -39,6 +39,7 @@
#include "squashfs_fs_sb.h"
#include "squashfs_fs_i.h"
#include "squashfs.h"
+#include "page_actor.h"
/*
* Locate cache slot in range [offset, index] for specified inode. If
@@ -495,7 +496,128 @@ static int squashfs_read_folio(struct file *file, struct folio *folio)
return 0;
}
+static void squashfs_readahead(struct readahead_control *ractl)
+{
+ struct inode *inode = ractl->mapping->host;
+ struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+ size_t mask = (1UL << msblk->block_log) - 1;
+ unsigned short shift = msblk->block_log - PAGE_SHIFT;
+ loff_t start = readahead_pos(ractl) & ~mask;
+ size_t len = readahead_length(ractl) + readahead_pos(ractl) - start;
+ struct squashfs_page_actor *actor;
+ unsigned int nr_pages = 0;
+ struct page **pages;
+ int i, file_end = i_size_read(inode) >> msblk->block_log;
+ unsigned int max_pages = 1UL << shift;
+
+ readahead_expand(ractl, start, (len | mask) + 1);
+
+ if (file_end == 0)
+ return;
+
+ pages = kmalloc_array(max_pages, sizeof(void *), GFP_KERNEL);
+ if (!pages)
+ return;
+
+ actor = squashfs_page_actor_init_special(pages, max_pages, 0);
+ if (!actor)
+ goto out;
+
+ for (;;) {
+ pgoff_t index;
+ int res, bsize;
+ u64 block = 0;
+ unsigned int expected;
+
+ nr_pages = __readahead_batch(ractl, pages, max_pages);
+ if (!nr_pages)
+ break;
+
+ if (readahead_pos(ractl) >= i_size_read(inode))
+ goto skip_pages;
+
+ index = pages[0]->index >> shift;
+ if ((pages[nr_pages - 1]->index >> shift) != index)
+ goto skip_pages;
+
+ expected = index == file_end ?
+ (i_size_read(inode) & (msblk->block_size - 1)) :
+ msblk->block_size;
+
+ bsize = read_blocklist(inode, index, &block);
+ if (bsize == 0)
+ goto skip_pages;
+
+ if (nr_pages < max_pages) {
+ struct squashfs_cache_entry *buffer;
+ unsigned int block_mask = max_pages - 1;
+ int offset = pages[0]->index - (pages[0]->index & ~block_mask);
+
+ buffer = squashfs_get_datablock(inode->i_sb, block,
+ bsize);
+ if (buffer->error) {
+ squashfs_cache_put(buffer);
+ goto skip_pages;
+ }
+
+ expected -= offset * PAGE_SIZE;
+ for (i = 0; i < nr_pages && expected > 0; i++,
+ expected -= PAGE_SIZE, offset++) {
+ int avail = min_t(int, expected, PAGE_SIZE);
+
+ squashfs_fill_page(pages[i], buffer,
+ offset * PAGE_SIZE, avail);
+ unlock_page(pages[i]);
+ }
+
+ squashfs_cache_put(buffer);
+ continue;
+ }
+
+ res = squashfs_read_data(inode->i_sb, block, bsize, NULL,
+ actor);
+
+ if (res == expected) {
+ int bytes;
+
+ /* Last page may have trailing bytes not filled */
+ bytes = res % PAGE_SIZE;
+ if (bytes) {
+ void *pageaddr;
+
+ pageaddr = kmap_atomic(pages[nr_pages - 1]);
+ memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
+ kunmap_atomic(pageaddr);
+ }
+
+ for (i = 0; i < nr_pages; i++) {
+ flush_dcache_page(pages[i]);
+ SetPageUptodate(pages[i]);
+ }
+ }
+
+ for (i = 0; i < nr_pages; i++) {
+ unlock_page(pages[i]);
+ put_page(pages[i]);
+ }
+ }
+
+ kfree(actor);
+ kfree(pages);
+ return;
+
+skip_pages:
+ for (i = 0; i < nr_pages; i++) {
+ unlock_page(pages[i]);
+ put_page(pages[i]);
+ }
+
+ kfree(actor);
+out:
+ kfree(pages);
+}
const struct address_space_operations squashfs_aops = {
- .read_folio = squashfs_read_folio
+ .read_folio = squashfs_read_folio,
+ .readahead = squashfs_readahead
};