The patch titled Subject: m68k: allow pte_offset_map[_lock]() to fail has been added to the -mm mm-unstable branch. Its filename is m68k-allow-pte_offset_map-to-fail.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/m68k-allow-pte_offset_map-to-fail.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Hugh Dickins <hughd@xxxxxxxxxx> Subject: m68k: allow pte_offset_map[_lock]() to fail Date: Thu, 8 Jun 2023 12:15:16 -0700 (PDT) In rare transient cases, not yet made possible, pte_offset_map() and pte_offset_map_lock() may not find a page table: handle appropriately. Restructure cf_tlb_miss() with a pte_unmap() (previously omitted) at label out, followed by one local_irq_restore() for all. Link: https://lkml.kernel.org/r/795f6a7-bcca-cdf-ad2a-fbdaa232998c@xxxxxxxxxx Signed-off-by: Hugh Dickins <hughd@xxxxxxxxxx> Cc: Alexander Gordeev <agordeev@xxxxxxxxxxxxx> Cc: Alexandre Ghiti <alexghiti@xxxxxxxxxxxx> Cc: Aneesh Kumar K.V <aneesh.kumar@xxxxxxxxxxxxx> Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Christian Borntraeger <borntraeger@xxxxxxxxxxxxx> Cc: Chris Zankel <chris@xxxxxxxxxx> Cc: Claudio Imbrenda <imbrenda@xxxxxxxxxxxxx> Cc: David Hildenbrand <david@xxxxxxxxxx> Cc: "David S. Miller" <davem@xxxxxxxxxxxxx> Cc: Geert Uytterhoeven <geert@xxxxxxxxxxxxxx> Cc: Greg Ungerer <gerg@xxxxxxxxxxxxxx> Cc: Heiko Carstens <hca@xxxxxxxxxxxxx> Cc: Helge Deller <deller@xxxxxx> Cc: "H. Peter Anvin" <hpa@xxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxx> Cc: John David Anglin <dave.anglin@xxxxxxxx> Cc: John Paul Adrian Glaubitz <glaubitz@xxxxxxxxxxxxxxxxxxx> Cc: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx> Cc: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Cc: Max Filippov <jcmvbkbc@xxxxxxxxx> Cc: Michael Ellerman <mpe@xxxxxxxxxxxxxx> Cc: Michal Simek <monstr@xxxxxxxxx> Cc: Mike Kravetz <mike.kravetz@xxxxxxxxxx> Cc: Mike Rapoport (IBM) <rppt@xxxxxxxxxx> Cc: Palmer Dabbelt <palmer@xxxxxxxxxxx> Cc: Palmer Dabbelt <palmer@xxxxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Qi Zheng <zhengqi.arch@xxxxxxxxxxxxx> Cc: Russell King <linux@xxxxxxxxxxxxxxx> Cc: Suren Baghdasaryan <surenb@xxxxxxxxxx> Cc: Thomas Bogendoerfer <tsbogend@xxxxxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- arch/m68k/include/asm/mmu_context.h | 6 ++- arch/m68k/kernel/sys_m68k.c | 2 + arch/m68k/mm/mcfmmu.c | 52 ++++++++++---------------- 3 files changed, 27 insertions(+), 33 deletions(-) --- a/arch/m68k/include/asm/mmu_context.h~m68k-allow-pte_offset_map-to-fail +++ a/arch/m68k/include/asm/mmu_context.h @@ -99,7 +99,7 @@ static inline void load_ksp_mmu(struct t p4d_t *p4d; pud_t *pud; pmd_t *pmd; - pte_t *pte; + pte_t *pte = NULL; unsigned long mmuar; local_irq_save(flags); @@ -139,7 +139,7 @@ static inline void load_ksp_mmu(struct t pte = (mmuar >= PAGE_OFFSET) ? pte_offset_kernel(pmd, mmuar) : pte_offset_map(pmd, mmuar); - if (pte_none(*pte) || !pte_present(*pte)) + if (!pte || pte_none(*pte) || !pte_present(*pte)) goto bug; set_pte(pte, pte_mkyoung(*pte)); @@ -161,6 +161,8 @@ static inline void load_ksp_mmu(struct t bug: pr_info("ksp load failed: mm=0x%p ksp=0x08%lx\n", mm, mmuar); end: + if (pte && mmuar < PAGE_OFFSET) + pte_unmap(pte); local_irq_restore(flags); } --- a/arch/m68k/kernel/sys_m68k.c~m68k-allow-pte_offset_map-to-fail +++ a/arch/m68k/kernel/sys_m68k.c @@ -488,6 +488,8 @@ sys_atomic_cmpxchg_32(unsigned long newv if (!pmd_present(*pmd)) goto bad_access; pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl); + if (!pte) + goto bad_access; if (!pte_present(*pte) || !pte_dirty(*pte) || !pte_write(*pte)) { pte_unmap_unlock(pte, ptl); --- a/arch/m68k/mm/mcfmmu.c~m68k-allow-pte_offset_map-to-fail +++ a/arch/m68k/mm/mcfmmu.c @@ -91,7 +91,8 @@ int cf_tlb_miss(struct pt_regs *regs, in p4d_t *p4d; pud_t *pud; pmd_t *pmd; - pte_t *pte; + pte_t *pte = NULL; + int ret = -1; int asid; local_irq_save(flags); @@ -100,47 +101,33 @@ int cf_tlb_miss(struct pt_regs *regs, in regs->pc + (extension_word * sizeof(long)); mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm; - if (!mm) { - local_irq_restore(flags); - return -1; - } + if (!mm) + goto out; pgd = pgd_offset(mm, mmuar); - if (pgd_none(*pgd)) { - local_irq_restore(flags); - return -1; - } + if (pgd_none(*pgd)) + goto out; p4d = p4d_offset(pgd, mmuar); - if (p4d_none(*p4d)) { - local_irq_restore(flags); - return -1; - } + if (p4d_none(*p4d)) + goto out; pud = pud_offset(p4d, mmuar); - if (pud_none(*pud)) { - local_irq_restore(flags); - return -1; - } + if (pud_none(*pud)) + goto out; pmd = pmd_offset(pud, mmuar); - if (pmd_none(*pmd)) { - local_irq_restore(flags); - return -1; - } + if (pmd_none(*pmd)) + goto out; pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar) : pte_offset_map(pmd, mmuar); - if (pte_none(*pte) || !pte_present(*pte)) { - local_irq_restore(flags); - return -1; - } + if (!pte || pte_none(*pte) || !pte_present(*pte)) + goto out; if (write) { - if (!pte_write(*pte)) { - local_irq_restore(flags); - return -1; - } + if (!pte_write(*pte)) + goto out; set_pte(pte, pte_mkdirty(*pte)); } @@ -161,9 +148,12 @@ int cf_tlb_miss(struct pt_regs *regs, in mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA); else mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA); - + ret = 0; +out: + if (pte && !KMAPAREA(mmuar)) + pte_unmap(pte); local_irq_restore(flags); - return 0; + return ret; } void __init cf_bootmem_alloc(void) _ Patches currently in -mm which might be from hughd@xxxxxxxxxx are arm-allow-pte_offset_map-to-fail.patch arm64-allow-pte_offset_map-to-fail.patch arm64-hugetlb-pte_alloc_huge-pte_offset_huge.patch ia64-hugetlb-pte_alloc_huge-pte_offset_huge.patch m68k-allow-pte_offset_map-to-fail.patch microblaze-allow-pte_offset_map-to-fail.patch mips-update_mmu_cache-can-replace-__update_tlb.patch parisc-add-pte_unmap-to-balance-get_ptep.patch parisc-unmap_uncached_pte-use-pte_offset_kernel.patch parisc-hugetlb-pte_alloc_huge-pte_offset_huge.patch powerpc-kvmppc_unmap_free_pmd-pte_offset_kernel.patch powerpc-allow-pte_offset_map-to-fail.patch powerpc-hugetlb-pte_alloc_huge.patch riscv-hugetlb-pte_alloc_huge-pte_offset_huge.patch s390-allow-pte_offset_map_lock-to-fail.patch s390-gmap-use-pte_unmap_unlock-not-spin_unlock.patch sh-hugetlb-pte_alloc_huge-pte_offset_huge.patch sparc-hugetlb-pte_alloc_huge-pte_offset_huge.patch sparc-allow-pte_offset_map-to-fail.patch sparc-iounit-and-iommu-use-pte_offset_kernel.patch x86-allow-get_locked_pte-to-fail.patch x86-sme_populate_pgd-use-pte_offset_kernel.patch xtensa-add-pte_unmap-to-balance-pte_offset_map.patch