In order to be able to flag the PMD entry with _PMD_HUGE_8M on powerpc 8xx, provide page size to pte_alloc_huge() and use it through the newly introduced pte_alloc_size(). Signed-off-by: Christophe Leroy <christophe.leroy@xxxxxxxxxx> --- arch/arm64/mm/hugetlbpage.c | 2 +- arch/parisc/mm/hugetlbpage.c | 2 +- arch/powerpc/mm/hugetlbpage.c | 2 +- arch/riscv/mm/hugetlbpage.c | 2 +- arch/sh/mm/hugetlbpage.c | 2 +- arch/sparc/mm/hugetlbpage.c | 2 +- include/linux/hugetlb.h | 4 ++-- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 0f0e10bb0a95..71161c655fd6 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -289,7 +289,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, return NULL; WARN_ON(addr & (sz - 1)); - ptep = pte_alloc_huge(mm, pmdp, addr); + ptep = pte_alloc_huge(mm, pmdp, addr, sz); } else if (sz == PMD_SIZE) { if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp))) ptep = huge_pmd_share(mm, vma, addr, pudp); diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index a9f7e21f6656..2f4c6b440710 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c @@ -66,7 +66,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, if (pud) { pmd = pmd_alloc(mm, pud, addr); if (pmd) - pte = pte_alloc_huge(mm, pmd, addr); + pte = pte_alloc_huge(mm, pmd, addr, sz); } return pte; } diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 594a4b7b2ca2..66ac56b26007 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -183,7 +183,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, return NULL; if (IS_ENABLED(CONFIG_PPC_8xx) && pshift < PMD_SHIFT) - return pte_alloc_huge(mm, (pmd_t *)hpdp, addr); + return pte_alloc_huge(mm, (pmd_t *)hpdp, addr, sz); BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp)); diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c index 5ef2a6891158..dc77a58c6321 100644 --- a/arch/riscv/mm/hugetlbpage.c +++ b/arch/riscv/mm/hugetlbpage.c @@ -67,7 +67,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, for_each_napot_order(order) { if (napot_cont_size(order) == sz) { - pte = pte_alloc_huge(mm, pmd, addr & napot_cont_mask(order)); + pte = pte_alloc_huge(mm, pmd, addr & napot_cont_mask(order), sz); break; } } diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index 6cb0ad73dbb9..26579429e5ed 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c @@ -38,7 +38,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, if (pud) { pmd = pmd_alloc(mm, pud, addr); if (pmd) - pte = pte_alloc_huge(mm, pmd, addr); + pte = pte_alloc_huge(mm, pmd, addr, sz); } } } diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index b432500c13a5..5a342199e837 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -298,7 +298,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, return NULL; if (sz >= PMD_SIZE) return (pte_t *)pmd; - return pte_alloc_huge(mm, pmd, addr); + return pte_alloc_huge(mm, pmd, addr, sz); } pte_t *huge_pte_offset(struct mm_struct *mm, diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 77b30a8c6076..d9c5d9daadc5 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -193,9 +193,9 @@ static inline pte_t *pte_offset_huge(pmd_t *pmd, unsigned long address) return pte_offset_kernel(pmd, address); } static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd, - unsigned long address) + unsigned long address, unsigned long sz) { - return pte_alloc(mm, pmd) ? NULL : pte_offset_huge(pmd, address); + return pte_alloc_size(mm, pmd, sz) ? NULL : pte_offset_huge(pmd, address); } #endif -- 2.43.0