This refactors hash__kernel_map_pages() function to call hash_debug_pagealloc_map_pages(). This will come useful when we will add kfence support. No functionality changes in this patch. Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@xxxxxxxxx> --- arch/powerpc/mm/book3s64/hash_utils.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 0b63acf62d1d..ab50bb33a390 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -349,7 +349,8 @@ static inline void hash_debug_pagealloc_add_slot(phys_addr_t paddr, int slot) linear_map_hash_slots[paddr >> PAGE_SHIFT] = slot | 0x80; } -int hash__kernel_map_pages(struct page *page, int numpages, int enable) +static int hash_debug_pagealloc_map_pages(struct page *page, int numpages, + int enable) { unsigned long flags, vaddr, lmi; int i; @@ -368,6 +369,12 @@ int hash__kernel_map_pages(struct page *page, int numpages, int enable) local_irq_restore(flags); return 0; } + +int hash__kernel_map_pages(struct page *page, int numpages, int enable) +{ + return hash_debug_pagealloc_map_pages(page, numpages, enable); +} + #else /* CONFIG_DEBUG_PAGEALLOC */ int hash__kernel_map_pages(struct page *page, int numpages, int enable) -- 2.46.0