From: Luis Chamberlain <mcgrof@xxxxxxxxxx> Supporting mapping_min_order implies that we guarantee each folio in the page cache has at least an order of mapping_min_order. So when adding new folios to the page cache we must ensure the index used is aligned to the mapping_min_order as the page cache requires the index to be aligned to the order of the folio. A higher order folio than min_order by definition is a multiple of the min_order. If an index is aligned to an order higher than a min_order, it will also be aligned to the min order. This effectively introduces no new functional changes when min order is not set other than a few rounding computations that should result in the same value. Signed-off-by: Luis Chamberlain <mcgrof@xxxxxxxxxx> Signed-off-by: Pankaj Raghav <p.raghav@xxxxxxxxxxx> --- include/linux/pagemap.h | 8 ++++++++ mm/filemap.c | 22 +++++++++++++--------- 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index fc8eb9c94e9c..fe8e1fbb667d 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -1328,6 +1328,14 @@ struct readahead_control { ._index = i, \ } +#define DEFINE_READAHEAD_ALIGNED(ractl, f, r, m, i) \ + struct readahead_control ractl = { \ + .file = f, \ + .mapping = m, \ + .ra = r, \ + ._index = mapping_align_start_index(m, i), \ + } + #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) void page_cache_ra_unbounded(struct readahead_control *, diff --git a/mm/filemap.c b/mm/filemap.c index 2b00442b9d19..bdf4f65f597c 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2478,11 +2478,11 @@ static int filemap_get_pages(struct kiocb *iocb, size_t count, struct file *filp = iocb->ki_filp; struct address_space *mapping = filp->f_mapping; struct file_ra_state *ra = &filp->f_ra; - pgoff_t index = iocb->ki_pos >> PAGE_SHIFT; - pgoff_t last_index; + pgoff_t index, last_index; struct folio *folio; int err = 0; + index = mapping_align_start_index(mapping, iocb->ki_pos >> PAGE_SHIFT); /* "last_index" is the index of the page beyond the end of the read */ last_index = DIV_ROUND_UP(iocb->ki_pos + count, PAGE_SIZE); retry: @@ -2500,8 +2500,7 @@ static int filemap_get_pages(struct kiocb *iocb, size_t count, if (!folio_batch_count(fbatch)) { if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ)) return -EAGAIN; - err = filemap_create_folio(filp, mapping, - iocb->ki_pos >> PAGE_SHIFT, fbatch); + err = filemap_create_folio(filp, mapping, index, fbatch); if (err == AOP_TRUNCATED_PAGE) goto retry; return err; @@ -3093,7 +3092,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) struct file *file = vmf->vma->vm_file; struct file_ra_state *ra = &file->f_ra; struct address_space *mapping = file->f_mapping; - DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); + DEFINE_READAHEAD_ALIGNED(ractl, file, ra, mapping, vmf->pgoff); struct file *fpin = NULL; unsigned long vm_flags = vmf->vma->vm_flags; unsigned int mmap_miss; @@ -3147,7 +3146,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2); ra->size = ra->ra_pages; ra->async_size = ra->ra_pages / 4; - ractl._index = ra->start; + ractl._index = mapping_align_start_index(mapping, ra->start); page_cache_ra_order(&ractl, ra, 0); return fpin; } @@ -3162,7 +3161,7 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf, { struct file *file = vmf->vma->vm_file; struct file_ra_state *ra = &file->f_ra; - DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff); + DEFINE_READAHEAD_ALIGNED(ractl, file, ra, file->f_mapping, vmf->pgoff); struct file *fpin = NULL; unsigned int mmap_miss; @@ -3211,11 +3210,12 @@ vm_fault_t filemap_fault(struct vm_fault *vmf) struct file *fpin = NULL; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; - pgoff_t max_idx, index = vmf->pgoff; + pgoff_t max_idx, index; struct folio *folio; vm_fault_t ret = 0; bool mapping_locked = false; + index = mapping_align_start_index(mapping, vmf->pgoff); max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); if (unlikely(index >= max_idx)) return VM_FAULT_SIGBUS; @@ -3321,7 +3321,10 @@ vm_fault_t filemap_fault(struct vm_fault *vmf) return VM_FAULT_SIGBUS; } - vmf->page = folio_file_page(folio, index); + VM_BUG_ON_FOLIO(folio_order(folio) < mapping_min_folio_order(mapping), + folio); + + vmf->page = folio_file_page(folio, vmf->pgoff); return ret | VM_FAULT_LOCKED; page_not_uptodate: @@ -3657,6 +3660,7 @@ static struct folio *do_read_cache_folio(struct address_space *mapping, struct folio *folio; int err; + index = mapping_align_start_index(mapping, index); if (!filler) filler = mapping->a_ops->read_folio; repeat: -- 2.43.0