From: Pankaj Raghav <p.raghav@xxxxxxxxxxx> Allocate folios with at least mapping_min_order in page_cache_ra_unbounded() and page_cache_ra_order() as we need to guarantee a minimum order in the page cache. Signed-off-by: Pankaj Raghav <p.raghav@xxxxxxxxxxx> Signed-off-by: Luis Chamberlain <mcgrof@xxxxxxxxxx> Acked-by: Darrick J. Wong <djwong@xxxxxxxxxx> Reviewed-by: Hannes Reinecke <hare@xxxxxxx> --- mm/readahead.c | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/mm/readahead.c b/mm/readahead.c index ef0004147952..73aef3f080ba 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -213,6 +213,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, unsigned long index = readahead_index(ractl); gfp_t gfp_mask = readahead_gfp_mask(mapping); unsigned long i = 0; + unsigned int min_nrpages = mapping_min_folio_nrpages(mapping); /* * Partway through the readahead operation, we will have added @@ -234,6 +235,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, struct folio *folio = xa_load(&mapping->i_pages, index + i); if (folio && !xa_is_value(folio)) { + long nr_pages = folio_nr_pages(folio); + /* * Page already present? Kick off the current batch * of contiguous pages before continuing with the @@ -243,19 +246,31 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, * not worth getting one just for that. */ read_pages(ractl); - ractl->_index += folio_nr_pages(folio); + + /* + * Move the ractl->_index by at least min_pages + * if the folio got truncated to respect the + * alignment constraint in the page cache. + * + */ + if (mapping != folio->mapping) + nr_pages = min_nrpages; + + VM_BUG_ON_FOLIO(nr_pages < min_nrpages, folio); + ractl->_index += nr_pages; i = ractl->_index + ractl->_nr_pages - index; continue; } - folio = filemap_alloc_folio(gfp_mask, 0); + folio = filemap_alloc_folio(gfp_mask, + mapping_min_folio_order(mapping)); if (!folio) break; if (filemap_add_folio(mapping, folio, index + i, gfp_mask) < 0) { folio_put(folio); read_pages(ractl); - ractl->_index++; + ractl->_index += min_nrpages; i = ractl->_index + ractl->_nr_pages - index; continue; } @@ -503,6 +518,7 @@ void page_cache_ra_order(struct readahead_control *ractl, { struct address_space *mapping = ractl->mapping; pgoff_t index = readahead_index(ractl); + unsigned int min_order = mapping_min_folio_order(mapping); pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT; pgoff_t mark = index + ra->size - ra->async_size; int err = 0; @@ -529,8 +545,13 @@ void page_cache_ra_order(struct readahead_control *ractl, if (index & ((1UL << order) - 1)) order = __ffs(index); /* Don't allocate pages past EOF */ - while (index + (1UL << order) - 1 > limit) + while (order > min_order && index + (1UL << order) - 1 > limit) order--; + + if (order < min_order) + order = min_order; + + VM_BUG_ON(index & ((1UL << order) - 1)); err = ra_alloc_folio(ractl, index, mark, order, gfp); if (err) break; -- 2.43.0