The quilt patch titled Subject: arm64/mm: convert READ_ONCE(*ptep) to ptep_get(ptep) has been removed from the -mm tree. Its filename was arm64-mm-convert-read_onceptep-to-ptep_getptep.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: Ryan Roberts <ryan.roberts@xxxxxxx> Subject: arm64/mm: convert READ_ONCE(*ptep) to ptep_get(ptep) Date: Thu, 15 Feb 2024 10:31:54 +0000 There are a number of places in the arch code that read a pte by using the READ_ONCE() macro. Refactor these call sites to instead use the ptep_get() helper, which itself is a READ_ONCE(). Generated code should be the same. This will benefit us when we shortly introduce the transparent contpte support. In this case, ptep_get() will become more complex so we now have all the code abstracted through it. Link: https://lkml.kernel.org/r/20240215103205.2607016-8-ryan.roberts@xxxxxxx Signed-off-by: Ryan Roberts <ryan.roberts@xxxxxxx> Tested-by: John Hubbard <jhubbard@xxxxxxxxxx> Acked-by: Mark Rutland <mark.rutland@xxxxxxx> Acked-by: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Alistair Popple <apopple@xxxxxxxxxx> Cc: Andrey Ryabinin <ryabinin.a.a@xxxxxxxxx> Cc: Ard Biesheuvel <ardb@xxxxxxxxxx> Cc: Barry Song <21cnbao@xxxxxxxxx> Cc: Borislav Petkov (AMD) <bp@xxxxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx> Cc: David Hildenbrand <david@xxxxxxxxxx> Cc: "H. Peter Anvin" <hpa@xxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: James Morse <james.morse@xxxxxxx> Cc: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx> Cc: Marc Zyngier <maz@xxxxxxxxxx> Cc: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> Cc: Yang Shi <shy828301@xxxxxxxxx> Cc: Zi Yan <ziy@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- arch/arm64/include/asm/pgtable.h | 12 +++++++++--- arch/arm64/kernel/efi.c | 2 +- arch/arm64/mm/fault.c | 4 ++-- arch/arm64/mm/hugetlbpage.c | 6 +++--- arch/arm64/mm/kasan_init.c | 2 +- arch/arm64/mm/mmu.c | 12 ++++++------ arch/arm64/mm/pageattr.c | 4 ++-- arch/arm64/mm/trans_pgd.c | 2 +- 8 files changed, 25 insertions(+), 19 deletions(-) --- a/arch/arm64/include/asm/pgtable.h~arm64-mm-convert-read_onceptep-to-ptep_getptep +++ a/arch/arm64/include/asm/pgtable.h @@ -275,6 +275,12 @@ static inline void set_pte(pte_t *ptep, } } +#define ptep_get ptep_get +static inline pte_t ptep_get(pte_t *ptep) +{ + return READ_ONCE(*ptep); +} + extern void __sync_icache_dcache(pte_t pteval); bool pgattr_change_is_safe(u64 old, u64 new); @@ -302,7 +308,7 @@ static inline void __check_safe_pte_upda if (!IS_ENABLED(CONFIG_DEBUG_VM)) return; - old_pte = READ_ONCE(*ptep); + old_pte = ptep_get(ptep); if (!pte_valid(old_pte) || !pte_valid(pte)) return; @@ -904,7 +910,7 @@ static inline int __ptep_test_and_clear_ { pte_t old_pte, pte; - pte = READ_ONCE(*ptep); + pte = ptep_get(ptep); do { old_pte = pte; pte = pte_mkold(pte); @@ -986,7 +992,7 @@ static inline void ptep_set_wrprotect(st { pte_t old_pte, pte; - pte = READ_ONCE(*ptep); + pte = ptep_get(ptep); do { old_pte = pte; pte = pte_wrprotect(pte); --- a/arch/arm64/kernel/efi.c~arm64-mm-convert-read_onceptep-to-ptep_getptep +++ a/arch/arm64/kernel/efi.c @@ -103,7 +103,7 @@ static int __init set_permissions(pte_t { struct set_perm_data *spd = data; const efi_memory_desc_t *md = spd->md; - pte_t pte = READ_ONCE(*ptep); + pte_t pte = ptep_get(ptep); if (md->attribute & EFI_MEMORY_RO) pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); --- a/arch/arm64/mm/fault.c~arm64-mm-convert-read_onceptep-to-ptep_getptep +++ a/arch/arm64/mm/fault.c @@ -191,7 +191,7 @@ static void show_pte(unsigned long addr) if (!ptep) break; - pte = READ_ONCE(*ptep); + pte = ptep_get(ptep); pr_cont(", pte=%016llx", pte_val(pte)); pte_unmap(ptep); } while(0); @@ -214,7 +214,7 @@ int ptep_set_access_flags(struct vm_area pte_t entry, int dirty) { pteval_t old_pteval, pteval; - pte_t pte = READ_ONCE(*ptep); + pte_t pte = ptep_get(ptep); if (pte_same(pte, entry)) return 0; --- a/arch/arm64/mm/hugetlbpage.c~arm64-mm-convert-read_onceptep-to-ptep_getptep +++ a/arch/arm64/mm/hugetlbpage.c @@ -485,7 +485,7 @@ void huge_ptep_set_wrprotect(struct mm_s size_t pgsize; pte_t pte; - if (!pte_cont(READ_ONCE(*ptep))) { + if (!pte_cont(ptep_get(ptep))) { ptep_set_wrprotect(mm, addr, ptep); return; } @@ -510,7 +510,7 @@ pte_t huge_ptep_clear_flush(struct vm_ar size_t pgsize; int ncontig; - if (!pte_cont(READ_ONCE(*ptep))) + if (!pte_cont(ptep_get(ptep))) return ptep_clear_flush(vma, addr, ptep); ncontig = find_num_contig(mm, addr, ptep, &pgsize); @@ -543,7 +543,7 @@ pte_t huge_ptep_modify_prot_start(struct * when the permission changes from executable to non-executable * in cases where cpu is affected with errata #2645198. */ - if (pte_user_exec(READ_ONCE(*ptep))) + if (pte_user_exec(ptep_get(ptep))) return huge_ptep_clear_flush(vma, addr, ptep); } return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); --- a/arch/arm64/mm/kasan_init.c~arm64-mm-convert-read_onceptep-to-ptep_getptep +++ a/arch/arm64/mm/kasan_init.c @@ -113,7 +113,7 @@ static void __init kasan_pte_populate(pm memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE); next = addr + PAGE_SIZE; set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); - } while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep))); + } while (ptep++, addr = next, addr != end && pte_none(ptep_get(ptep))); } static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, --- a/arch/arm64/mm/mmu.c~arm64-mm-convert-read_onceptep-to-ptep_getptep +++ a/arch/arm64/mm/mmu.c @@ -173,7 +173,7 @@ static void init_pte(pmd_t *pmdp, unsign ptep = pte_set_fixmap_offset(pmdp, addr); do { - pte_t old_pte = READ_ONCE(*ptep); + pte_t old_pte = ptep_get(ptep); set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot)); @@ -182,7 +182,7 @@ static void init_pte(pmd_t *pmdp, unsign * only allow updates to the permission attributes. */ BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), - READ_ONCE(pte_val(*ptep)))); + pte_val(ptep_get(ptep)))); phys += PAGE_SIZE; } while (ptep++, addr += PAGE_SIZE, addr != end); @@ -852,7 +852,7 @@ static void unmap_hotplug_pte_range(pmd_ do { ptep = pte_offset_kernel(pmdp, addr); - pte = READ_ONCE(*ptep); + pte = ptep_get(ptep); if (pte_none(pte)) continue; @@ -985,7 +985,7 @@ static void free_empty_pte_table(pmd_t * do { ptep = pte_offset_kernel(pmdp, addr); - pte = READ_ONCE(*ptep); + pte = ptep_get(ptep); /* * This is just a sanity check here which verifies that @@ -1004,7 +1004,7 @@ static void free_empty_pte_table(pmd_t * */ ptep = pte_offset_kernel(pmdp, 0UL); for (i = 0; i < PTRS_PER_PTE; i++) { - if (!pte_none(READ_ONCE(ptep[i]))) + if (!pte_none(ptep_get(&ptep[i]))) return; } @@ -1473,7 +1473,7 @@ pte_t ptep_modify_prot_start(struct vm_a * when the permission changes from executable to non-executable * in cases where cpu is affected with errata #2645198. */ - if (pte_user_exec(READ_ONCE(*ptep))) + if (pte_user_exec(ptep_get(ptep))) return ptep_clear_flush(vma, addr, ptep); } return ptep_get_and_clear(vma->vm_mm, addr, ptep); --- a/arch/arm64/mm/pageattr.c~arm64-mm-convert-read_onceptep-to-ptep_getptep +++ a/arch/arm64/mm/pageattr.c @@ -36,7 +36,7 @@ bool can_set_direct_map(void) static int change_page_range(pte_t *ptep, unsigned long addr, void *data) { struct page_change_data *cdata = data; - pte_t pte = READ_ONCE(*ptep); + pte_t pte = ptep_get(ptep); pte = clear_pte_bit(pte, cdata->clear_mask); pte = set_pte_bit(pte, cdata->set_mask); @@ -245,5 +245,5 @@ bool kernel_page_present(struct page *pa return true; ptep = pte_offset_kernel(pmdp, addr); - return pte_valid(READ_ONCE(*ptep)); + return pte_valid(ptep_get(ptep)); } --- a/arch/arm64/mm/trans_pgd.c~arm64-mm-convert-read_onceptep-to-ptep_getptep +++ a/arch/arm64/mm/trans_pgd.c @@ -33,7 +33,7 @@ static void *trans_alloc(struct trans_pg static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr) { - pte_t pte = READ_ONCE(*src_ptep); + pte_t pte = ptep_get(src_ptep); if (pte_valid(pte)) { /* _ Patches currently in -mm which might be from ryan.roberts@xxxxxxx are