From: Pankaj Raghav <p.raghav@xxxxxxxxxxx> ALign the indices to mapping_min_order number of pages in filemap_fault(). Signed-off-by: Pankaj Raghav <p.raghav@xxxxxxxxxxx> --- mm/filemap.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index 3853df90f9cf..f97099de80b3 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3288,13 +3288,17 @@ vm_fault_t filemap_fault(struct vm_fault *vmf) struct file *file = vmf->vma->vm_file; struct file *fpin = NULL; struct address_space *mapping = file->f_mapping; + unsigned int min_order = mapping_min_folio_order(mapping); + unsigned int nrpages = 1UL << min_order; struct inode *inode = mapping->host; - pgoff_t max_idx, index = vmf->pgoff; + pgoff_t max_idx, index = round_down(vmf->pgoff, nrpages); struct folio *folio; vm_fault_t ret = 0; bool mapping_locked = false; max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); + max_idx = round_up(max_idx, nrpages); + if (unlikely(index >= max_idx)) return VM_FAULT_SIGBUS; @@ -3386,13 +3390,17 @@ vm_fault_t filemap_fault(struct vm_fault *vmf) * We must recheck i_size under page lock. */ max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); + max_idx = round_up(max_idx, nrpages); + if (unlikely(index >= max_idx)) { folio_unlock(folio); folio_put(folio); return VM_FAULT_SIGBUS; } - vmf->page = folio_file_page(folio, index); + VM_BUG_ON_FOLIO(folio_order(folio) < min_order, folio); + + vmf->page = folio_file_page(folio, vmf->pgoff); return ret | VM_FAULT_LOCKED; page_not_uptodate: -- 2.40.1