The patch titled Subject: mm/various: give up if pte_offset_map[_lock]() fails has been added to the -mm mm-unstable branch. Its filename is mm-various-give-up-if-pte_offset_map-fails.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-various-give-up-if-pte_offset_map-fails.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Hugh Dickins <hughd@xxxxxxxxxx> Subject: mm/various: give up if pte_offset_map[_lock]() fails Date: Thu, 8 Jun 2023 18:29:22 -0700 (PDT) Following the examples of nearby code, various functions can just give up if pte_offset_map() or pte_offset_map_lock() fails. And there's no need for a preliminary pmd_trans_unstable() or other such check, since such cases are now safely handled inside. Link: https://lkml.kernel.org/r/7b9bd85d-1652-cbf2-159d-f503b45e5b@xxxxxxxxxx Signed-off-by: Hugh Dickins <hughd@xxxxxxxxxx> Cc: Alistair Popple <apopple@xxxxxxxxxx> Cc: Anshuman Khandual <anshuman.khandual@xxxxxxx> Cc: Axel Rasmussen <axelrasmussen@xxxxxxxxxx> Cc: Christophe Leroy <christophe.leroy@xxxxxxxxxx> Cc: Christoph Hellwig <hch@xxxxxxxxxxxxx> Cc: David Hildenbrand <david@xxxxxxxxxx> Cc: "Huang, Ying" <ying.huang@xxxxxxxxx> Cc: Ira Weiny <ira.weiny@xxxxxxxxx> Cc: Jason Gunthorpe <jgg@xxxxxxxx> Cc: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx> Cc: Lorenzo Stoakes <lstoakes@xxxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> Cc: Miaohe Lin <linmiaohe@xxxxxxxxxx> Cc: Mike Kravetz <mike.kravetz@xxxxxxxxxx> Cc: Mike Rapoport (IBM) <rppt@xxxxxxxxxx> Cc: Minchan Kim <minchan@xxxxxxxxxx> Cc: Naoya Horiguchi <naoya.horiguchi@xxxxxxx> Cc: Pavel Tatashin <pasha.tatashin@xxxxxxxxxx> Cc: Peter Xu <peterx@xxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Qi Zheng <zhengqi.arch@xxxxxxxxxxxxx> Cc: Ralph Campbell <rcampbell@xxxxxxxxxx> Cc: Ryan Roberts <ryan.roberts@xxxxxxx> Cc: SeongJae Park <sj@xxxxxxxxxx> Cc: Song Liu <song@xxxxxxxxxx> Cc: Steven Price <steven.price@xxxxxxx> Cc: Suren Baghdasaryan <surenb@xxxxxxxxxx> Cc: Thomas Hellström <thomas.hellstrom@xxxxxxxxxxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> Cc: Yang Shi <shy828301@xxxxxxxxx> Cc: Yu Zhao <yuzhao@xxxxxxxxxx> Cc: Zack Rusin <zackr@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/gup.c | 9 ++++++--- mm/ksm.c | 7 ++++--- mm/memcontrol.c | 8 ++++---- mm/memory-failure.c | 8 +++++--- mm/migrate.c | 3 +++ 5 files changed, 22 insertions(+), 13 deletions(-) --- a/mm/gup.c~mm-various-give-up-if-pte_offset_map-fails +++ a/mm/gup.c @@ -545,10 +545,10 @@ static struct page *follow_page_pte(stru if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == (FOLL_PIN | FOLL_GET))) return ERR_PTR(-EINVAL); - if (unlikely(pmd_bad(*pmd))) - return no_page_table(vma, flags); ptep = pte_offset_map_lock(mm, pmd, address, &ptl); + if (!ptep) + return no_page_table(vma, flags); pte = *ptep; if (!pte_present(pte)) goto no_page; @@ -852,8 +852,9 @@ static int get_gate_page(struct mm_struc pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return -EFAULT; - VM_BUG_ON(pmd_trans_huge(*pmd)); pte = pte_offset_map(pmd, address); + if (!pte) + return -EFAULT; if (pte_none(*pte)) goto unmap; *vma = get_gate_vma(mm); @@ -2468,6 +2469,8 @@ static int gup_pte_range(pmd_t pmd, pmd_ pte_t *ptep, *ptem; ptem = ptep = pte_offset_map(&pmd, addr); + if (!ptep) + return 0; do { pte_t pte = ptep_get_lockless(ptep); struct page *page; --- a/mm/ksm.c~mm-various-give-up-if-pte_offset_map-fails +++ a/mm/ksm.c @@ -431,10 +431,9 @@ static int break_ksm_pmd_entry(pmd_t *pm pte_t *pte; int ret; - if (pmd_leaf(*pmd) || !pmd_present(*pmd)) - return 0; - pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + if (!pte) + return 0; if (pte_present(*pte)) { page = vm_normal_page(walk->vma, addr, *pte); } else if (!pte_none(*pte)) { @@ -1203,6 +1202,8 @@ static int replace_page(struct vm_area_s mmu_notifier_invalidate_range_start(&range); ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); + if (!ptep) + goto out_mn; if (!pte_same(*ptep, orig_pte)) { pte_unmap_unlock(ptep, ptl); goto out_mn; --- a/mm/memcontrol.c~mm-various-give-up-if-pte_offset_map-fails +++ a/mm/memcontrol.c @@ -6021,9 +6021,9 @@ static int mem_cgroup_count_precharge_pt return 0; } - if (pmd_trans_unstable(pmd)) - return 0; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + if (!pte) + return 0; for (; addr != end; pte++, addr += PAGE_SIZE) if (get_mctgt_type(vma, addr, *pte, NULL)) mc.precharge++; /* increment precharge temporarily */ @@ -6241,10 +6241,10 @@ static int mem_cgroup_move_charge_pte_ra return 0; } - if (pmd_trans_unstable(pmd)) - return 0; retry: pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + if (!pte) + return 0; for (; addr != end; addr += PAGE_SIZE) { pte_t ptent = *(pte++); bool device = false; --- a/mm/memory-failure.c~mm-various-give-up-if-pte_offset_map-fails +++ a/mm/memory-failure.c @@ -405,6 +405,8 @@ static unsigned long dev_pagemap_mapping if (pmd_devmap(*pmd)) return PMD_SHIFT; pte = pte_offset_map(pmd, address); + if (!pte) + return 0; if (pte_present(*pte) && pte_devmap(*pte)) ret = PAGE_SHIFT; pte_unmap(pte); @@ -791,11 +793,11 @@ static int hwpoison_pte_range(pmd_t *pmd goto out; } - if (pmd_trans_unstable(pmdp)) - goto out; - mapped_pte = ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp, addr, &ptl); + if (!ptep) + goto out; + for (; addr != end; ptep++, addr += PAGE_SIZE) { ret = check_hwpoisoned_entry(*ptep, addr, PAGE_SHIFT, hwp->pfn, &hwp->tk); --- a/mm/migrate.c~mm-various-give-up-if-pte_offset_map-fails +++ a/mm/migrate.c @@ -305,6 +305,9 @@ void migration_entry_wait(struct mm_stru swp_entry_t entry; ptep = pte_offset_map_lock(mm, pmd, address, &ptl); + if (!ptep) + return; + pte = *ptep; pte_unmap(ptep); _ Patches currently in -mm which might be from hughd@xxxxxxxxxx are arm-allow-pte_offset_map-to-fail.patch arm64-allow-pte_offset_map-to-fail.patch arm64-hugetlb-pte_alloc_huge-pte_offset_huge.patch ia64-hugetlb-pte_alloc_huge-pte_offset_huge.patch m68k-allow-pte_offset_map-to-fail.patch microblaze-allow-pte_offset_map-to-fail.patch mips-update_mmu_cache-can-replace-__update_tlb.patch mips-update_mmu_cache-can-replace-__update_tlb-fix.patch parisc-add-pte_unmap-to-balance-get_ptep.patch parisc-unmap_uncached_pte-use-pte_offset_kernel.patch parisc-hugetlb-pte_alloc_huge-pte_offset_huge.patch powerpc-kvmppc_unmap_free_pmd-pte_offset_kernel.patch powerpc-allow-pte_offset_map-to-fail.patch powerpc-hugetlb-pte_alloc_huge.patch riscv-hugetlb-pte_alloc_huge-pte_offset_huge.patch s390-allow-pte_offset_map_lock-to-fail.patch s390-gmap-use-pte_unmap_unlock-not-spin_unlock.patch sh-hugetlb-pte_alloc_huge-pte_offset_huge.patch sparc-hugetlb-pte_alloc_huge-pte_offset_huge.patch sparc-allow-pte_offset_map-to-fail.patch sparc-iounit-and-iommu-use-pte_offset_kernel.patch x86-allow-get_locked_pte-to-fail.patch x86-sme_populate_pgd-use-pte_offset_kernel.patch xtensa-add-pte_unmap-to-balance-pte_offset_map.patch mm-use-pmdp_get_lockless-without-surplus-barrier.patch mm-migrate-remove-cruft-from-migration_entry_waits.patch mm-pgtable-kmap_local_page-instead-of-kmap_atomic.patch mm-pgtable-allow-pte_offset_map-to-fail.patch mm-filemap-allow-pte_offset_map_lock-to-fail.patch mm-page_vma_mapped-delete-bogosity-in-page_vma_mapped_walk.patch mm-page_vma_mapped-reformat-map_pte-with-less-indentation.patch mm-page_vma_mapped-pte_offset_map_nolock-not-pte_lockptr.patch mm-pagewalkers-action_again-if-pte_offset_map_lock-fails.patch mm-pagewalk-walk_pte_range-allow-for-pte_offset_map.patch mm-vmwgfx-simplify-pmd-pud-mapping-dirty-helpers.patch mm-vmalloc-vmalloc_to_page-use-pte_offset_kernel.patch mm-hmm-retry-if-pte_offset_map-fails.patch mm-userfaultfd-retry-if-pte_offset_map-fails.patch mm-userfaultfd-allow-pte_offset_map_lock-to-fail.patch mm-debug_vm_pgtablepage_table_check-warn-pte-map-fails.patch mm-various-give-up-if-pte_offset_map-fails.patch mm-mprotect-delete-pmd_none_or_clear_bad_unless_trans_huge.patch mm-mremap-retry-if-either-pte_offset_map_lock-fails.patch mm-madvise-clean-up-pte_offset_map_lock-scans.patch mm-madvise-clean-up-force_shm_swapin_readahead.patch mm-swapoff-allow-pte_offset_map-to-fail.patch mm-mglru-allow-pte_offset_map_nolock-to-fail.patch mm-migrate_device-allow-pte_offset_map_lock-to-fail.patch mm-gup-remove-foll_split_pmd-use-of-pmd_trans_unstable.patch mm-huge_memory-split-huge-pmd-under-one-pte_offset_map.patch mm-khugepaged-allow-pte_offset_map-to-fail.patch mm-memory-allow-pte_offset_map-to-fail.patch mm-memory-handle_pte_fault-use-pte_offset_map_nolock.patch mm-pgtable-delete-pmd_trans_unstable-and-friends.patch mm-swap-swap_vma_readahead-do-the-pte_offset_map.patch perf-core-allow-pte_offset_map-to-fail.patch