The quilt patch titled Subject: mm/khugepaged: maintain page cache uptodate flag has been removed from the -mm tree. Its filename was mm-khugepaged-maintain-page-cache-uptodate-flag.patch This patch was dropped because an updated version will be merged ------------------------------------------------------ From: David Stevens <stevensd@xxxxxxxxxxxx> Subject: mm/khugepaged: maintain page cache uptodate flag Date: Tue, 7 Mar 2023 14:20:36 +0900 Make sure that collapse_file doesn't interfere with checking the uptodate flag in the page cache by only inserting hpage into the page cache after it has been updated and marked uptodate. This is achieved by simply not replacing present pages with hpage when iterating over the target range. The present pages are already locked, so replacing the with the locked hpage before the collapse is finalized is unnecessary. This fixes a race where folio_seek_hole_data would mistake hpage for an fallocated but unwritten page. This race is visible to userspace via data temporarily disappearing from SEEK_DATA/SEEK_HOLE. Link: https://lkml.kernel.org/r/20230307052036.1520708-4-stevensd@xxxxxxxxxx Fixes: f3f0e1d2150b ("khugepaged: add support of collapse for tmpfs/shmem pages") Signed-off-by: David Stevens <stevensd@xxxxxxxxxxxx> Acked-by: Peter Xu <peterx@xxxxxxxxxx> Cc: David Hildenbrand <david@xxxxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: Jiaqi Yan <jiaqiyan@xxxxxxxxxx> Cc: "Kirill A . Shutemov" <kirill@xxxxxxxxxxxxx> Cc: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Cc: Yang Shi <shy828301@xxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- --- a/mm/khugepaged.c~mm-khugepaged-maintain-page-cache-uptodate-flag +++ a/mm/khugepaged.c @@ -1930,12 +1930,6 @@ static int collapse_file(struct mm_struc } } while (1); - /* - * At this point the hpage is locked and not up-to-date. - * It's safe to insert it into the page cache, because nobody would - * be able to map it or use it in another way until we unlock it. - */ - xas_set(&xas, start); for (index = start; index < end; index++) { page = xas_next(&xas); @@ -2104,13 +2098,9 @@ static int collapse_file(struct mm_struc } /* - * Add the page to the list to be able to undo the collapse if - * something go wrong. + * Accumulate the pages that are being collapsed. */ list_add_tail(&page->lru, &pagelist); - - /* Finally, replace with the new page. */ - xas_store(&xas, hpage); continue; out_unlock: unlock_page(page); @@ -2149,8 +2139,7 @@ xa_unlocked: goto rollback; /* - * Replacing old pages with new one has succeeded, now we - * attempt to copy the contents. + * The old pages are locked, so they won't change anymore. */ index = start; list_for_each_entry(page, &pagelist, lru) { @@ -2230,11 +2219,11 @@ immap_locked: /* nr_none is always 0 for non-shmem. */ __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none); } - /* Join all the small entries into a single multi-index entry. */ - xas_set_order(&xas, start, HPAGE_PMD_ORDER); - xas_store(&xas, hpage); - xas_unlock_irq(&xas); + /* + * Mark hpage as uptodate before inserting it into the page cache so + * that it isn't mistaken for an fallocated but unwritten page. + */ folio = page_folio(hpage); folio_mark_uptodate(folio); folio_ref_add(folio, HPAGE_PMD_NR - 1); @@ -2243,6 +2232,11 @@ immap_locked: folio_mark_dirty(folio); folio_add_lru(folio); + /* Join all the small entries into a single multi-index entry. */ + xas_set_order(&xas, start, HPAGE_PMD_ORDER); + xas_store(&xas, hpage); + xas_unlock_irq(&xas); + /* * Remove pte page tables, so we can re-fault the page as huge. */ @@ -2267,36 +2261,18 @@ immap_locked: rollback: /* Something went wrong: roll back page cache changes */ - xas_lock_irq(&xas); if (nr_none) { mapping->nrpages -= nr_none; shmem_uncharge(mapping->host, nr_none); } - xas_set(&xas, start); - end = index; - for (index = start; index < end; index++) { - xas_next(&xas); - page = list_first_entry_or_null(&pagelist, - struct page, lru); - if (!page || xas.xa_index < page->index) { - nr_none--; - continue; - } - - VM_BUG_ON_PAGE(page->index != xas.xa_index, page); - + list_for_each_entry_safe(page, tmp, &pagelist, lru) { /* Unfreeze the page. */ list_del(&page->lru); page_ref_unfreeze(page, 2); - xas_store(&xas, page); - xas_pause(&xas); - xas_unlock_irq(&xas); unlock_page(page); putback_lru_page(page); - xas_lock_irq(&xas); } - VM_BUG_ON(nr_none); /* * Undo the updates of filemap_nr_thps_inc for non-SHMEM file only. * This undo is not needed unless failure is due to SCAN_COPY_MC. @@ -2304,8 +2280,6 @@ rollback: if (!is_shmem && result == SCAN_COPY_MC) filemap_nr_thps_dec(mapping); - xas_unlock_irq(&xas); - hpage->mapping = NULL; unlock_page(hpage); _ Patches currently in -mm which might be from stevensd@xxxxxxxxxxxx are