From: Pankaj Raghav <p.raghav@xxxxxxxxxxx> Align the index to mapping_min_order in force_page_cache_ra(). This will ensure that the folios allocated for readahead that are added to the page cache are aligned to mapping_min_order. Signed-off-by: Pankaj Raghav <p.raghav@xxxxxxxxxxx> --- mm/readahead.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/mm/readahead.c b/mm/readahead.c index 2a9e9020b7cf..838dd9ca8dad 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -318,6 +318,8 @@ void force_page_cache_ra(struct readahead_control *ractl, struct file_ra_state *ra = ractl->ra; struct backing_dev_info *bdi = inode_to_bdi(mapping->host); unsigned long max_pages, index; + unsigned int folio_order = mapping_min_folio_order(mapping); + unsigned int nr_of_pages = (1 << folio_order); if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead)) return; @@ -327,6 +329,13 @@ void force_page_cache_ra(struct readahead_control *ractl, * be up to the optimal hardware IO size */ index = readahead_index(ractl); + if (folio_order && (index & (nr_of_pages - 1))) { + unsigned long old_index = index; + + index = round_down(index, nr_of_pages); + nr_to_read += (old_index - index); + } + max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages); nr_to_read = min_t(unsigned long, nr_to_read, max_pages); while (nr_to_read) { @@ -335,6 +344,7 @@ void force_page_cache_ra(struct readahead_control *ractl, if (this_chunk > nr_to_read) this_chunk = nr_to_read; ractl->_index = index; + VM_BUG_ON(index & (nr_of_pages - 1)); do_page_cache_ra(ractl, this_chunk, 0); index += this_chunk; -- 2.40.1