The quilt patch titled Subject: mm/treewide: replace pmd_large() with pmd_leaf() has been removed from the -mm tree. Its filename was mm-treewide-replace-pmd_large-with-pmd_leaf.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: Peter Xu <peterx@xxxxxxxxxx> Subject: mm/treewide: replace pmd_large() with pmd_leaf() Date: Tue, 5 Mar 2024 12:37:47 +0800 pmd_large() is always defined as pmd_leaf(). Merge their usages. Chose pmd_leaf() because pmd_leaf() is a global API, while pmd_large() is not. Link: https://lkml.kernel.org/r/20240305043750.93762-8-peterx@xxxxxxxxxx Signed-off-by: Peter Xu <peterx@xxxxxxxxxx> Reviewed-by: Jason Gunthorpe <jgg@xxxxxxxxxx> Reviewed-by: Mike Rapoport (IBM) <rppt@xxxxxxxxxx> Cc: Alexander Potapenko <glider@xxxxxxxxxx> Cc: Andrey Konovalov <andreyknvl@xxxxxxxxx> Cc: Andrey Ryabinin <ryabinin.a.a@xxxxxxxxx> Cc: "Aneesh Kumar K.V" <aneesh.kumar@xxxxxxxxxx> Cc: Borislav Petkov <bp@xxxxxxxxx> Cc: Christophe Leroy <christophe.leroy@xxxxxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: Kirill A. Shutemov <kirill@xxxxxxxxxxxxx> Cc: Michael Ellerman <mpe@xxxxxxxxxxxxxx> Cc: Muchun Song <muchun.song@xxxxxxxxx> Cc: "Naveen N. Rao" <naveen.n.rao@xxxxxxxxxxxxx> Cc: Nicholas Piggin <npiggin@xxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Vincenzo Frascino <vincenzo.frascino@xxxxxxx> Cc: Yang Shi <shy828301@xxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- arch/arm/mm/dump.c | 4 ++-- arch/powerpc/mm/book3s64/pgtable.c | 2 +- arch/powerpc/mm/book3s64/radix_pgtable.c | 2 +- arch/powerpc/mm/pgtable_64.c | 2 +- arch/s390/boot/vmem.c | 2 +- arch/s390/include/asm/pgtable.h | 8 ++++---- arch/s390/mm/gmap.c | 12 ++++++------ arch/s390/mm/hugetlbpage.c | 2 +- arch/s390/mm/pageattr.c | 2 +- arch/s390/mm/pgtable.c | 6 +++--- arch/s390/mm/vmem.c | 6 +++--- arch/sparc/mm/init_64.c | 4 ++-- arch/x86/boot/compressed/ident_map_64.c | 2 +- arch/x86/kvm/mmu/mmu.c | 2 +- arch/x86/mm/fault.c | 8 ++++---- arch/x86/mm/init_32.c | 2 +- arch/x86/mm/init_64.c | 8 ++++---- arch/x86/mm/kasan_init_64.c | 2 +- arch/x86/mm/mem_encrypt_identity.c | 4 ++-- arch/x86/mm/pat/set_memory.c | 4 ++-- arch/x86/mm/pgtable.c | 2 +- arch/x86/mm/pti.c | 4 ++-- arch/x86/power/hibernate.c | 2 +- arch/x86/xen/mmu_pv.c | 4 ++-- drivers/misc/sgi-gru/grufault.c | 2 +- 25 files changed, 49 insertions(+), 49 deletions(-) --- a/arch/arm/mm/dump.c~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/arch/arm/mm/dump.c @@ -349,12 +349,12 @@ static void walk_pmd(struct pg_state *st for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { addr = start + i * PMD_SIZE; domain = get_domain_name(pmd); - if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd)) + if (pmd_none(*pmd) || pmd_leaf(*pmd) || !pmd_present(*pmd)) note_page(st, addr, 4, pmd_val(*pmd), domain); else walk_pte(st, pmd, addr, domain); - if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) { + if (SECTION_SIZE < PMD_SIZE && pmd_leaf(pmd[1])) { addr += SECTION_SIZE; pmd++; domain = get_domain_name(pmd); --- a/arch/powerpc/mm/book3s64/pgtable.c~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/arch/powerpc/mm/book3s64/pgtable.c @@ -113,7 +113,7 @@ void set_pmd_at(struct mm_struct *mm, un WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp))); assert_spin_locked(pmd_lockptr(mm, pmdp)); - WARN_ON(!(pmd_large(pmd))); + WARN_ON(!(pmd_leaf(pmd))); #endif trace_hugepage_set_pmd(addr, pmd_val(pmd)); return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); --- a/arch/powerpc/mm/book3s64/radix_pgtable.c~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -924,7 +924,7 @@ bool vmemmap_can_optimize(struct vmem_al int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, unsigned long addr, unsigned long next) { - int large = pmd_large(*pmdp); + int large = pmd_leaf(*pmdp); if (large) vmemmap_verify(pmdp_ptep(pmdp), node, addr, next); --- a/arch/powerpc/mm/pgtable_64.c~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/arch/powerpc/mm/pgtable_64.c @@ -132,7 +132,7 @@ struct page *pmd_page(pmd_t pmd) * enabled so these checks can't be used. */ if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) - VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd))); + VM_WARN_ON(!(pmd_leaf(pmd) || pmd_huge(pmd))); return pte_page(pmd_pte(pmd)); } return virt_to_page(pmd_page_vaddr(pmd)); --- a/arch/s390/boot/vmem.c~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/arch/s390/boot/vmem.c @@ -333,7 +333,7 @@ static void pgtable_pmd_populate(pud_t * } pte = boot_pte_alloc(); pmd_populate(&init_mm, pmd, pte); - } else if (pmd_large(*pmd)) { + } else if (pmd_leaf(*pmd)) { continue; } pgtable_pte_populate(pmd, addr, next, mode); --- a/arch/s390/include/asm/pgtable.h~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/arch/s390/include/asm/pgtable.h @@ -721,7 +721,7 @@ static inline int pmd_large(pmd_t pmd) static inline int pmd_bad(pmd_t pmd) { - if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd)) + if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_leaf(pmd)) return 1; return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; } @@ -820,8 +820,8 @@ static inline int pte_protnone(pte_t pte static inline int pmd_protnone(pmd_t pmd) { - /* pmd_large(pmd) implies pmd_present(pmd) */ - return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ); + /* pmd_leaf(pmd) implies pmd_present(pmd) */ + return pmd_leaf(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ); } #endif @@ -1385,7 +1385,7 @@ static inline unsigned long pmd_deref(pm unsigned long origin_mask; origin_mask = _SEGMENT_ENTRY_ORIGIN; - if (pmd_large(pmd)) + if (pmd_leaf(pmd)) origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE; return (unsigned long)__va(pmd_val(pmd) & origin_mask); } --- a/arch/s390/mm/gmap.c~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/arch/s390/mm/gmap.c @@ -603,7 +603,7 @@ int __gmap_link(struct gmap *gmap, unsig pmd = pmd_offset(pud, vmaddr); VM_BUG_ON(pmd_none(*pmd)); /* Are we allowed to use huge pages? */ - if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m) + if (pmd_leaf(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m) return -EFAULT; /* Link gmap segment table entry location to page table. */ rc = radix_tree_preload(GFP_KERNEL_ACCOUNT); @@ -615,7 +615,7 @@ int __gmap_link(struct gmap *gmap, unsig rc = radix_tree_insert(&gmap->host_to_guest, vmaddr >> PMD_SHIFT, table); if (!rc) { - if (pmd_large(*pmd)) { + if (pmd_leaf(*pmd)) { *table = (pmd_val(*pmd) & _SEGMENT_ENTRY_HARDWARE_BITS_LARGE) | _SEGMENT_ENTRY_GMAP_UC; @@ -945,7 +945,7 @@ static inline pmd_t *gmap_pmd_op_walk(st } /* 4k page table entries are locked via the pte (pte_alloc_map_lock). */ - if (!pmd_large(*pmdp)) + if (!pmd_leaf(*pmdp)) spin_unlock(&gmap->guest_table_lock); return pmdp; } @@ -957,7 +957,7 @@ static inline pmd_t *gmap_pmd_op_walk(st */ static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp) { - if (pmd_large(*pmdp)) + if (pmd_leaf(*pmdp)) spin_unlock(&gmap->guest_table_lock); } @@ -1068,7 +1068,7 @@ static int gmap_protect_range(struct gma rc = -EAGAIN; pmdp = gmap_pmd_op_walk(gmap, gaddr); if (pmdp) { - if (!pmd_large(*pmdp)) { + if (!pmd_leaf(*pmdp)) { rc = gmap_protect_pte(gmap, gaddr, pmdp, prot, bits); if (!rc) { @@ -2500,7 +2500,7 @@ void gmap_sync_dirty_log_pmd(struct gmap if (!pmdp) return; - if (pmd_large(*pmdp)) { + if (pmd_leaf(*pmdp)) { if (gmap_test_and_clear_dirty_pmd(gmap, pmdp, gaddr)) bitmap_fill(bitmap, _PAGE_ENTRIES); } else { --- a/arch/s390/mm/hugetlbpage.c~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/arch/s390/mm/hugetlbpage.c @@ -235,7 +235,7 @@ pte_t *huge_pte_offset(struct mm_struct int pmd_huge(pmd_t pmd) { - return pmd_large(pmd); + return pmd_leaf(pmd); } int pud_huge(pud_t pud) --- a/arch/s390/mm/pageattr.c~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/arch/s390/mm/pageattr.c @@ -185,7 +185,7 @@ static int walk_pmd_level(pud_t *pudp, u if (pmd_none(*pmdp)) return -EINVAL; next = pmd_addr_end(addr, end); - if (pmd_large(*pmdp)) { + if (pmd_leaf(*pmdp)) { need_split = !!(flags & SET_MEMORY_4K); need_split |= !!(addr & ~PMD_MASK); need_split |= !!(addr + PMD_SIZE > next); --- a/arch/s390/mm/pgtable.c~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/arch/s390/mm/pgtable.c @@ -827,7 +827,7 @@ again: return key ? -EFAULT : 0; } - if (pmd_large(*pmdp)) { + if (pmd_leaf(*pmdp)) { paddr = pmd_val(*pmdp) & HPAGE_MASK; paddr |= addr & ~HPAGE_MASK; /* @@ -938,7 +938,7 @@ again: return 0; } - if (pmd_large(*pmdp)) { + if (pmd_leaf(*pmdp)) { paddr = pmd_val(*pmdp) & HPAGE_MASK; paddr |= addr & ~HPAGE_MASK; cc = page_reset_referenced(paddr); @@ -1002,7 +1002,7 @@ again: return 0; } - if (pmd_large(*pmdp)) { + if (pmd_leaf(*pmdp)) { paddr = pmd_val(*pmdp) & HPAGE_MASK; paddr |= addr & ~HPAGE_MASK; *key = page_get_storage_key(paddr); --- a/arch/s390/mm/vmem.c~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/arch/s390/mm/vmem.c @@ -236,7 +236,7 @@ static int __ref modify_pmd_table(pud_t if (!add) { if (pmd_none(*pmd)) continue; - if (pmd_large(*pmd)) { + if (pmd_leaf(*pmd)) { if (IS_ALIGNED(addr, PMD_SIZE) && IS_ALIGNED(next, PMD_SIZE)) { if (!direct) @@ -281,7 +281,7 @@ static int __ref modify_pmd_table(pud_t if (!pte) goto out; pmd_populate(&init_mm, pmd, pte); - } else if (pmd_large(*pmd)) { + } else if (pmd_leaf(*pmd)) { if (!direct) vmemmap_use_sub_pmd(addr, next); continue; @@ -610,7 +610,7 @@ pte_t *vmem_get_alloc_pte(unsigned long if (!pte) goto out; pmd_populate(&init_mm, pmd, pte); - } else if (WARN_ON_ONCE(pmd_large(*pmd))) { + } else if (WARN_ON_ONCE(pmd_leaf(*pmd))) { goto out; } ptep = pte_offset_kernel(pmd, addr); --- a/arch/sparc/mm/init_64.c~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/arch/sparc/mm/init_64.c @@ -1672,7 +1672,7 @@ bool kern_addr_valid(unsigned long addr) if (pmd_none(*pmd)) return false; - if (pmd_large(*pmd)) + if (pmd_leaf(*pmd)) return pfn_valid(pmd_pfn(*pmd)); pte = pte_offset_kernel(pmd, addr); @@ -2968,7 +2968,7 @@ void update_mmu_cache_pmd(struct vm_area struct mm_struct *mm; pmd_t entry = *pmd; - if (!pmd_large(entry) || !pmd_young(entry)) + if (!pmd_leaf(entry) || !pmd_young(entry)) return; pte = pmd_val(entry); --- a/arch/x86/boot/compressed/ident_map_64.c~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/arch/x86/boot/compressed/ident_map_64.c @@ -284,7 +284,7 @@ static int set_clr_page_flags(struct x86 pudp = pud_offset(p4dp, address); pmdp = pmd_offset(pudp, address); - if (pmd_large(*pmdp)) + if (pmd_leaf(*pmdp)) ptep = split_large_pmd(info, pmdp, address); else ptep = pte_offset_kernel(pmdp, address); --- a/arch/x86/kvm/mmu/mmu.c~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/arch/x86/kvm/mmu/mmu.c @@ -3135,7 +3135,7 @@ static int host_pfn_mapping_level(struct if (pmd_none(pmd) || !pmd_present(pmd)) goto out; - if (pmd_large(pmd)) + if (pmd_leaf(pmd)) level = PG_LEVEL_2M; out: --- a/arch/x86/mm/fault.c~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/arch/x86/mm/fault.c @@ -250,7 +250,7 @@ static noinline int vmalloc_fault(unsign if (!pmd_k) return -1; - if (pmd_large(*pmd_k)) + if (pmd_leaf(*pmd_k)) return 0; pte_k = pte_offset_kernel(pmd_k, address); @@ -319,7 +319,7 @@ static void dump_pagetable(unsigned long * And let's rather not kmap-atomic the pte, just in case * it's allocated already: */ - if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) + if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_leaf(*pmd)) goto out; pte = pte_offset_kernel(pmd, address); @@ -384,7 +384,7 @@ static void dump_pagetable(unsigned long goto bad; pr_cont("PMD %lx ", pmd_val(*pmd)); - if (!pmd_present(*pmd) || pmd_large(*pmd)) + if (!pmd_present(*pmd) || pmd_leaf(*pmd)) goto out; pte = pte_offset_kernel(pmd, address); @@ -1053,7 +1053,7 @@ spurious_kernel_fault(unsigned long erro if (!pmd_present(*pmd)) return 0; - if (pmd_large(*pmd)) + if (pmd_leaf(*pmd)) return spurious_kernel_fault_check(error_code, (pte_t *) pmd); pte = pte_offset_kernel(pmd, address); --- a/arch/x86/mm/init_32.c~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/arch/x86/mm/init_32.c @@ -463,7 +463,7 @@ void __init native_pagetable_init(void) break; /* should not be large page here */ - if (pmd_large(*pmd)) { + if (pmd_leaf(*pmd)) { pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n", pfn, pmd, __pa(pmd)); BUG_ON(1); --- a/arch/x86/mm/init_64.c~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/arch/x86/mm/init_64.c @@ -530,7 +530,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned } if (!pmd_none(*pmd)) { - if (!pmd_large(*pmd)) { + if (!pmd_leaf(*pmd)) { spin_lock(&init_mm.page_table_lock); pte = (pte_t *)pmd_page_vaddr(*pmd); paddr_last = phys_pte_init(pte, paddr, @@ -1114,7 +1114,7 @@ remove_pmd_table(pmd_t *pmd_start, unsig if (!pmd_present(*pmd)) continue; - if (pmd_large(*pmd)) { + if (pmd_leaf(*pmd)) { if (IS_ALIGNED(addr, PMD_SIZE) && IS_ALIGNED(next, PMD_SIZE)) { if (!direct) @@ -1520,9 +1520,9 @@ void __meminit vmemmap_set_pmd(pmd_t *pm int __meminit vmemmap_check_pmd(pmd_t *pmd, int node, unsigned long addr, unsigned long next) { - int large = pmd_large(*pmd); + int large = pmd_leaf(*pmd); - if (pmd_large(*pmd)) { + if (pmd_leaf(*pmd)) { vmemmap_verify((pte_t *)pmd, node, addr, next); vmemmap_use_sub_pmd(addr, next); } --- a/arch/x86/mm/kasan_init_64.c~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/arch/x86/mm/kasan_init_64.c @@ -95,7 +95,7 @@ static void __init kasan_populate_pud(pu pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); - if (!pmd_large(*pmd)) + if (!pmd_leaf(*pmd)) kasan_populate_pmd(pmd, addr, next, nid); } while (pmd++, addr = next, addr != end); } --- a/arch/x86/mm/mem_encrypt_identity.c~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/arch/x86/mm/mem_encrypt_identity.c @@ -161,7 +161,7 @@ static void __init sme_populate_pgd_larg return; pmd = pmd_offset(pud, ppd->vaddr); - if (pmd_large(*pmd)) + if (pmd_leaf(*pmd)) return; set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags)); @@ -185,7 +185,7 @@ static void __init sme_populate_pgd(stru set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte))); } - if (pmd_large(*pmd)) + if (pmd_leaf(*pmd)) return; pte = pte_offset_kernel(pmd, ppd->vaddr); --- a/arch/x86/mm/pat/set_memory.c~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/arch/x86/mm/pat/set_memory.c @@ -692,7 +692,7 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, return NULL; *level = PG_LEVEL_2M; - if (pmd_large(*pmd) || !pmd_present(*pmd)) + if (pmd_leaf(*pmd) || !pmd_present(*pmd)) return (pte_t *)pmd; *level = PG_LEVEL_4K; @@ -1229,7 +1229,7 @@ static void unmap_pmd_range(pud_t *pud, * Try to unmap in 2M chunks. */ while (end - start >= PMD_SIZE) { - if (pmd_large(*pmd)) + if (pmd_leaf(*pmd)) pmd_clear(pmd); else __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE); --- a/arch/x86/mm/pgtable.c~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/arch/x86/mm/pgtable.c @@ -792,7 +792,7 @@ int pud_clear_huge(pud_t *pud) */ int pmd_clear_huge(pmd_t *pmd) { - if (pmd_large(*pmd)) { + if (pmd_leaf(*pmd)) { pmd_clear(pmd); return 1; } --- a/arch/x86/mm/pti.c~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/arch/x86/mm/pti.c @@ -252,7 +252,7 @@ static pte_t *pti_user_pagetable_walk_pt return NULL; /* We can't do anything sensible if we hit a large mapping. */ - if (pmd_large(*pmd)) { + if (pmd_leaf(*pmd)) { WARN_ON(1); return NULL; } @@ -341,7 +341,7 @@ pti_clone_pgtable(unsigned long start, u continue; } - if (pmd_large(*pmd) || level == PTI_CLONE_PMD) { + if (pmd_leaf(*pmd) || level == PTI_CLONE_PMD) { target_pmd = pti_user_pagetable_walk_pmd(addr); if (WARN_ON(!target_pmd)) return; --- a/arch/x86/power/hibernate.c~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/arch/x86/power/hibernate.c @@ -175,7 +175,7 @@ int relocate_restore_code(void) goto out; } pmd = pmd_offset(pud, relocated_restore_code); - if (pmd_large(*pmd)) { + if (pmd_leaf(*pmd)) { set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX)); goto out; } --- a/arch/x86/xen/mmu_pv.c~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/arch/x86/xen/mmu_pv.c @@ -1059,7 +1059,7 @@ static void __init xen_cleanmfnmap_pmd(p pte_t *pte_tbl; int i; - if (pmd_large(*pmd)) { + if (pmd_leaf(*pmd)) { pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK; xen_free_ro_pages(pa, PMD_SIZE); return; @@ -1871,7 +1871,7 @@ static phys_addr_t __init xen_early_virt if (!pmd_present(pmd)) return 0; pa = pmd_val(pmd) & PTE_PFN_MASK; - if (pmd_large(pmd)) + if (pmd_leaf(pmd)) return pa + (vaddr & ~PMD_MASK); pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) * --- a/drivers/misc/sgi-gru/grufault.c~mm-treewide-replace-pmd_large-with-pmd_leaf +++ a/drivers/misc/sgi-gru/grufault.c @@ -227,7 +227,7 @@ static int atomic_pte_lookup(struct vm_a if (unlikely(pmd_none(*pmdp))) goto err; #ifdef CONFIG_X86_64 - if (unlikely(pmd_large(*pmdp))) + if (unlikely(pmd_leaf(*pmdp))) pte = ptep_get((pte_t *)pmdp); else #endif _ Patches currently in -mm which might be from peterx@xxxxxxxxxx are