On Thu, Feb 3, 2022 at 2:12 PM Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> wrote: > > On Thu, 3 Feb 2022 10:26:41 -0800 Yang Shi <shy828301@xxxxxxxxx> wrote: > > > v4: * s/Treated/Treat per David > > * Collected acked-by tag from David > > v3: * Fixed the fix tag, the one used by v2 was not accurate > > * Added comment about the risk calling page_mapcount() per David > > * Fix pagemap > > v2: * Added proper fix tag per Jann Horn > > * Rebased to the latest linus's tree > > The v2->v4 delta shows changes which aren't described above? They are. v4: * s/Treated/Treat per David * Collected acked-by tag from David v3: * Fixed the fix tag, the one used by v2 was not accurate * Added comment about the risk calling page_mapcount() per David * Fix pagemap > > --- a/fs/proc/task_mmu.c~fs-proc-task_mmuc-dont-read-mapcount-for-migration-entry-v4 > +++ a/fs/proc/task_mmu.c > @@ -469,9 +469,12 @@ static void smaps_account(struct mem_siz > * If any subpage of the compound page mapped with PTE it would elevate > * page_count(). > * > - * Treated regular migration entries as mapcount == 1 without reading > - * mapcount since calling page_mapcount() for migration entries is > - * racy against THP splitting. > + * The page_mapcount() is called to get a snapshot of the mapcount. > + * Without holding the page lock this snapshot can be slightly wrong as > + * we cannot always read the mapcount atomically. It is not safe to > + * call page_mapcount() even with PTL held if the page is not mapped, > + * especially for migration entries. Treat regular migration entries > + * as mapcount == 1. > */ > if ((page_count(page) == 1) || migration) { > smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty, > @@ -1393,6 +1396,7 @@ static pagemap_entry_t pte_to_pagemap_en > { > u64 frame = 0, flags = 0; > struct page *page = NULL; > + bool migration = false; > > if (pte_present(pte)) { > if (pm->show_pfn) > @@ -1414,13 +1418,14 @@ static pagemap_entry_t pte_to_pagemap_en > frame = swp_type(entry) | > (swp_offset(entry) << MAX_SWAPFILES_SHIFT); > flags |= PM_SWAP; > + migration = is_migration_entry(entry); > if (is_pfn_swap_entry(entry)) > page = pfn_swap_entry_to_page(entry); > } > > if (page && !PageAnon(page)) > flags |= PM_FILE; > - if (page && page_mapcount(page) == 1) > + if (page && !migration && page_mapcount(page) == 1) > flags |= PM_MMAP_EXCLUSIVE; > if (vma->vm_flags & VM_SOFTDIRTY) > flags |= PM_SOFT_DIRTY; > @@ -1436,6 +1441,7 @@ static int pagemap_pmd_range(pmd_t *pmdp > spinlock_t *ptl; > pte_t *pte, *orig_pte; > int err = 0; > + bool migration = false; > > #ifdef CONFIG_TRANSPARENT_HUGEPAGE > ptl = pmd_trans_huge_lock(pmdp, vma); > @@ -1476,11 +1482,12 @@ static int pagemap_pmd_range(pmd_t *pmdp > if (pmd_swp_uffd_wp(pmd)) > flags |= PM_UFFD_WP; > VM_BUG_ON(!is_pmd_migration_entry(pmd)); > + migration = is_migration_entry(entry); > page = pfn_swap_entry_to_page(entry); > } > #endif > > - if (page && page_mapcount(page) == 1) > + if (page && !migration && page_mapcount(page) == 1) > flags |= PM_MMAP_EXCLUSIVE; > > for (; addr != end; addr += PAGE_SIZE) { > _ >