Previously, in folio_pte_batch(), only the upper boundary of the folio was checked using '>=' for comparison. This led to incorrect behavior when the next PFN exceeded the lower boundary of the folio, especially in corner cases where the next PFN might fall into a different folio. Signed-off-by: Lance Yang <ioworker0@xxxxxxxxx> --- mm/memory.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 642b4f2be523..e5291d1e8c37 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -986,12 +986,15 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags, bool *any_writable) { - unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio); + unsigned long folio_start_pfn, folio_end_pfn; const pte_t *end_ptep = start_ptep + max_nr; pte_t expected_pte, *ptep; bool writable; int nr; + folio_start_pfn = folio_pfn(folio); + folio_end_pfn = folio_start_pfn + folio_nr_pages(folio); + if (any_writable) *any_writable = false; @@ -1015,7 +1018,7 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, * corner cases the next PFN might fall into a different * folio. */ - if (pte_pfn(pte) >= folio_end_pfn) + if (pte_pfn(pte) >= folio_end_pfn || pte_pfn(pte) < folio_start_pfn) break; if (any_writable) -- 2.33.1