Hi Peter, [...] > diff --git a/mm/memcontrol.c b/mm/memcontrol.c > index ae1f5d0cb581..4b46c099ad94 100644 > --- a/mm/memcontrol.c > +++ b/mm/memcontrol.c > @@ -5738,7 +5738,7 @@ static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, > > if (pte_present(ptent)) > page = mc_handle_present_pte(vma, addr, ptent); > - else if (is_swap_pte(ptent)) > + else if (pte_has_swap_entry(ptent)) > page = mc_handle_swap_pte(vma, ptent, &ent); > else if (pte_none(ptent)) > page = mc_handle_file_pte(vma, addr, ptent, &ent); As I understand things pte_none() == False for a special swap pte, but shouldn't this be treated as pte_none() here? Ie. does this need to be pte_none(ptent) || is_swap_special_pte() here? > diff --git a/mm/memory.c b/mm/memory.c > index 0e0de08a2cd5..998a4f9a3744 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -3491,6 +3491,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) > if (!pte_unmap_same(vmf)) > goto out; > > + /* > + * We should never call do_swap_page upon a swap special pte; just be > + * safe to bail out if it happens. > + */ > + if (WARN_ON_ONCE(is_swap_special_pte(vmf->orig_pte))) > + goto out; > + > entry = pte_to_swp_entry(vmf->orig_pte); > if (unlikely(non_swap_entry(entry))) { > if (is_migration_entry(entry)) { Are there other changes required here? Because we can end up with stale special pte's and a special pte is !pte_none don't we need to fix some of the !pte_none checks in these functions: insert_pfn() -> checks for !pte_none remap_pte_range() -> BUG_ON(!pte_none) apply_to_pte_range() -> didn't check further but it tests for !pte_none In general it feels like I might be missing something here though. There are plenty of checks in the kernel for pte_none() which haven't been updated. Is there some rule that says none of those paths can see a special pte? > diff --git a/mm/migrate.c b/mm/migrate.c > index 23cbd9de030b..b477d0d5f911 100644 > --- a/mm/migrate.c > +++ b/mm/migrate.c > @@ -294,7 +294,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, > > spin_lock(ptl); > pte = *ptep; > - if (!is_swap_pte(pte)) > + if (!pte_has_swap_entry(pte)) > goto out; > > entry = pte_to_swp_entry(pte); > @@ -2276,7 +2276,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, > > pte = *ptep; > > - if (pte_none(pte)) { > + if (pte_none(pte) || is_swap_special_pte(pte)) { I was wondering if we can loose the special pte information here? However I see that in migrate_vma_insert_page() we check again and fail the migration if !pte_none() so I think this is ok. I think it would be better if this check was moved below so the migration fails early. Ie: if (pte_none(pte)) { if (vma_is_anonymous(vma) && !is_swap_special_pte(pte)) { Also how does this work for page migration in general? I can see in page_vma_mapped_walk() that we skip special pte's, but doesn't this mean we loose the special pte in that instance? Or is that ok for some reason? > if (vma_is_anonymous(vma)) { > mpfn = MIGRATE_PFN_MIGRATE; > migrate->cpages++; > diff --git a/mm/mincore.c b/mm/mincore.c > index 9122676b54d6..5728c3e6473f 100644 > --- a/mm/mincore.c > +++ b/mm/mincore.c > @@ -121,7 +121,7 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, > for (; addr != end; ptep++, addr += PAGE_SIZE) { > pte_t pte = *ptep; > > - if (pte_none(pte)) > + if (pte_none(pte) || is_swap_special_pte(pte)) > __mincore_unmapped_range(addr, addr + PAGE_SIZE, > vma, vec); > else if (pte_present(pte)) > diff --git a/mm/mprotect.c b/mm/mprotect.c > index 883e2cc85cad..4b743394afbe 100644 > --- a/mm/mprotect.c > +++ b/mm/mprotect.c > @@ -139,7 +139,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, > } > ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent); > pages++; > - } else if (is_swap_pte(oldpte)) { > + } else if (pte_has_swap_entry(oldpte)) { > swp_entry_t entry = pte_to_swp_entry(oldpte); > pte_t newpte; > > diff --git a/mm/mremap.c b/mm/mremap.c > index 5989d3990020..122b279333ee 100644 > --- a/mm/mremap.c > +++ b/mm/mremap.c > @@ -125,7 +125,7 @@ static pte_t move_soft_dirty_pte(pte_t pte) > #ifdef CONFIG_MEM_SOFT_DIRTY > if (pte_present(pte)) > pte = pte_mksoft_dirty(pte); > - else if (is_swap_pte(pte)) > + else if (pte_has_swap_entry(pte)) > pte = pte_swp_mksoft_dirty(pte); > #endif > return pte; > diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c > index f7b331081791..ff57b67426af 100644 > --- a/mm/page_vma_mapped.c > +++ b/mm/page_vma_mapped.c > @@ -36,7 +36,7 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw) > * For more details on device private memory see HMM > * (include/linux/hmm.h or mm/hmm.c). > */ > - if (is_swap_pte(*pvmw->pte)) { > + if (pte_has_swap_entry(*pvmw->pte)) { > swp_entry_t entry; > > /* Handle un-addressable ZONE_DEVICE memory */ > @@ -90,7 +90,7 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw) > > if (pvmw->flags & PVMW_MIGRATION) { > swp_entry_t entry; > - if (!is_swap_pte(*pvmw->pte)) > + if (!pte_has_swap_entry(*pvmw->pte)) > return false; > entry = pte_to_swp_entry(*pvmw->pte); > > @@ -99,7 +99,7 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw) > return false; > > pfn = swp_offset(entry); > - } else if (is_swap_pte(*pvmw->pte)) { > + } else if (pte_has_swap_entry(*pvmw->pte)) { > swp_entry_t entry; > > /* Handle un-addressable ZONE_DEVICE memory */ > diff --git a/mm/swapfile.c b/mm/swapfile.c > index 1e07d1c776f2..4993b4454c13 100644 > --- a/mm/swapfile.c > +++ b/mm/swapfile.c > @@ -1951,7 +1951,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, > si = swap_info[type]; > pte = pte_offset_map(pmd, addr); > do { > - if (!is_swap_pte(*pte)) > + if (!pte_has_swap_entry(*pte)) > continue; > > entry = pte_to_swp_entry(*pte); >