vma_mmu_pagesize() is only required for slices, otherwise there is a generic weak version doing the exact same thing. Move it to slice.c Signed-off-by: Christophe Leroy <christophe.leroy@xxxxxxxxxx> Reviewed-by: Nicholas Piggin <npiggin@xxxxxxxxx> --- arch/powerpc/mm/hugetlbpage.c | 11 ----------- arch/powerpc/mm/slice.c | 9 +++++++++ 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index b642a5a8668f..7b89f0799d82 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -565,17 +565,6 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, } #endif -unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) -{ - /* With radix we don't use slice, so derive it from vma*/ - if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled()) { - unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start); - - return 1UL << mmu_psize_to_shift(psize); - } - return vma_kernel_pagesize(vma); -} - bool __init arch_hugetlb_valid_size(unsigned long size) { int shift = __ffs(size); diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index f42711f865f3..8a3ac062b71e 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -759,4 +759,13 @@ int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, return !slice_check_range_fits(mm, maskp, addr, len); } + +unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) +{ + /* With radix we don't use slice, so derive it from vma*/ + if (radix_enabled()) + return vma_kernel_pagesize(vma); + + return 1UL << mmu_psize_to_shift(get_slice_psize(vma->vm_mm, vma->vm_start)); +} #endif -- 2.35.1