The patch titled Subject: mm: clear uffd-wp PTE/PMD state on mremap() has been added to the -mm mm-hotfixes-unstable branch. Its filename is mm-clear-uffd-wp-pte-pmd-state-on-mremap.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-clear-uffd-wp-pte-pmd-state-on-mremap.patch This patch will later appear in the mm-hotfixes-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Ryan Roberts <ryan.roberts@xxxxxxx> Subject: mm: clear uffd-wp PTE/PMD state on mremap() Date: Tue, 7 Jan 2025 14:47:52 +0000 When mremap()ing a memory region previously registered with userfaultfd as write-protected but without UFFD_FEATURE_EVENT_REMAP, an inconsistency in flag clearing leads to a mismatch between the vma flags (which have uffd-wp cleared) and the pte/pmd flags (which do not have uffd-wp cleared). This mismatch causes a subsequent mprotect(PROT_WRITE) to trigger a warning in page_table_check_pte_flags() due to setting the pte to writable while uffd-wp is still set. Fix this by always explicitly clearing the uffd-wp pte/pmd flags on any such mremap() so that the values are consistent with the existing clearing of VM_UFFD_WP. Be careful to clear the logical flag regardless of its physical form; a PTE bit, a swap PTE bit, or a PTE marker. Cover PTE, huge PMD and hugetlb paths. Link: https://lkml.kernel.org/r/20250107144755.1871363-2-ryan.roberts@xxxxxxx Co-developed-by: MikoÅ?aj Lenczewski <miko.lenczewski@xxxxxxx> Signed-off-by: MikoÅ?aj Lenczewski <miko.lenczewski@xxxxxxx> Signed-off-by: Ryan Roberts <ryan.roberts@xxxxxxx> Closes: https://lore.kernel.org/linux-mm/810b44a8-d2ae-4107-b665-5a42eae2d948@xxxxxxx/ Fixes: 63b2d4174c4a ("userfaultfd: wp: add the writeprotect API to userfaultfd ioctl") Cc: David Hildenbrand <david@xxxxxxxxxx> Cc: Jann Horn <jannh@xxxxxxxxxx> Cc: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> Cc: Lorenzo Stoakes <lorenzo.stoakes@xxxxxxxxxx> Cc: Mark Rutland <mark.rutland@xxxxxxx> Cc: Muchun Song <muchun.song@xxxxxxxxx> Cc: Peter Xu <peterx@xxxxxxxxxx> Cc: Shuah Khan <shuah@xxxxxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Cc: <stable@xxxxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/userfaultfd_k.h | 12 ++++++++++++ mm/huge_memory.c | 12 ++++++++++++ mm/hugetlb.c | 14 +++++++++++++- mm/mremap.c | 32 +++++++++++++++++++++++++++++++- 4 files changed, 68 insertions(+), 2 deletions(-) --- a/include/linux/userfaultfd_k.h~mm-clear-uffd-wp-pte-pmd-state-on-mremap +++ a/include/linux/userfaultfd_k.h @@ -247,6 +247,13 @@ static inline bool vma_can_userfault(str vma_is_shmem(vma); } +static inline bool vma_has_uffd_without_event_remap(struct vm_area_struct *vma) +{ + struct userfaultfd_ctx *uffd_ctx = vma->vm_userfaultfd_ctx.ctx; + + return uffd_ctx && (uffd_ctx->features & UFFD_FEATURE_EVENT_REMAP) == 0; +} + extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *); extern void dup_userfaultfd_complete(struct list_head *); void dup_userfaultfd_fail(struct list_head *); @@ -401,6 +408,11 @@ static inline bool userfaultfd_wp_async( { return false; } + +static inline bool vma_has_uffd_without_event_remap(struct vm_area_struct *vma) +{ + return false; +} #endif /* CONFIG_USERFAULTFD */ --- a/mm/huge_memory.c~mm-clear-uffd-wp-pte-pmd-state-on-mremap +++ a/mm/huge_memory.c @@ -2206,6 +2206,16 @@ static pmd_t move_soft_dirty_pmd(pmd_t p return pmd; } +static pmd_t clear_uffd_wp_pmd(pmd_t pmd) +{ + if (pmd_present(pmd)) + pmd = pmd_clear_uffd_wp(pmd); + else if (is_swap_pmd(pmd)) + pmd = pmd_swp_clear_uffd_wp(pmd); + + return pmd; +} + bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) { @@ -2244,6 +2254,8 @@ bool move_huge_pmd(struct vm_area_struct pgtable_trans_huge_deposit(mm, new_pmd, pgtable); } pmd = move_soft_dirty_pmd(pmd); + if (vma_has_uffd_without_event_remap(vma)) + pmd = clear_uffd_wp_pmd(pmd); set_pmd_at(mm, new_addr, new_pmd, pmd); if (force_flush) flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE); --- a/mm/hugetlb.c~mm-clear-uffd-wp-pte-pmd-state-on-mremap +++ a/mm/hugetlb.c @@ -5402,6 +5402,7 @@ static void move_huge_pte(struct vm_area unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte, unsigned long sz) { + bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma); struct hstate *h = hstate_vma(vma); struct mm_struct *mm = vma->vm_mm; spinlock_t *src_ptl, *dst_ptl; @@ -5418,7 +5419,18 @@ static void move_huge_pte(struct vm_area spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); pte = huge_ptep_get_and_clear(mm, old_addr, src_pte); - set_huge_pte_at(mm, new_addr, dst_pte, pte, sz); + + if (need_clear_uffd_wp && pte_marker_uffd_wp(pte)) + huge_pte_clear(mm, new_addr, dst_pte, sz); + else { + if (need_clear_uffd_wp) { + if (pte_present(pte)) + pte = huge_pte_clear_uffd_wp(pte); + else if (is_swap_pte(pte)) + pte = pte_swp_clear_uffd_wp(pte); + } + set_huge_pte_at(mm, new_addr, dst_pte, pte, sz); + } if (src_ptl != dst_ptl) spin_unlock(src_ptl); --- a/mm/mremap.c~mm-clear-uffd-wp-pte-pmd-state-on-mremap +++ a/mm/mremap.c @@ -138,6 +138,7 @@ static int move_ptes(struct vm_area_stru struct vm_area_struct *new_vma, pmd_t *new_pmd, unsigned long new_addr, bool need_rmap_locks) { + bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma); struct mm_struct *mm = vma->vm_mm; pte_t *old_pte, *new_pte, pte; pmd_t dummy_pmdval; @@ -216,7 +217,18 @@ static int move_ptes(struct vm_area_stru force_flush = true; pte = move_pte(pte, old_addr, new_addr); pte = move_soft_dirty_pte(pte); - set_pte_at(mm, new_addr, new_pte, pte); + + if (need_clear_uffd_wp && pte_marker_uffd_wp(pte)) + pte_clear(mm, new_addr, new_pte); + else { + if (need_clear_uffd_wp) { + if (pte_present(pte)) + pte = pte_clear_uffd_wp(pte); + else if (is_swap_pte(pte)) + pte = pte_swp_clear_uffd_wp(pte); + } + set_pte_at(mm, new_addr, new_pte, pte); + } } arch_leave_lazy_mmu_mode(); @@ -278,6 +290,15 @@ static bool move_normal_pmd(struct vm_ar if (WARN_ON_ONCE(!pmd_none(*new_pmd))) return false; + /* If this pmd belongs to a uffd vma with remap events disabled, we need + * to ensure that the uffd-wp state is cleared from all pgtables. This + * means recursing into lower page tables in move_page_tables(), and we + * can reuse the existing code if we simply treat the entry as "not + * moved". + */ + if (vma_has_uffd_without_event_remap(vma)) + return false; + /* * We don't have to worry about the ordering of src and dst * ptlocks because exclusive mmap_lock prevents deadlock. @@ -333,6 +354,15 @@ static bool move_normal_pud(struct vm_ar if (WARN_ON_ONCE(!pud_none(*new_pud))) return false; + /* If this pud belongs to a uffd vma with remap events disabled, we need + * to ensure that the uffd-wp state is cleared from all pgtables. This + * means recursing into lower page tables in move_page_tables(), and we + * can reuse the existing code if we simply treat the entry as "not + * moved". + */ + if (vma_has_uffd_without_event_remap(vma)) + return false; + /* * We don't have to worry about the ordering of src and dst * ptlocks because exclusive mmap_lock prevents deadlock. _ Patches currently in -mm which might be from ryan.roberts@xxxxxxx are mm-clear-uffd-wp-pte-pmd-state-on-mremap.patch