Currently kernel_map_linear_page() function assumes to be working on linear_map_hash_slots array. But since in later patches we need a separate linear map array for kfence, hence make kernel_map_linear_page() take a linear map array and lock in it's function argument. This is needed to separate out kfence from debug_pagealloc infrastructure. Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@xxxxxxxxx> --- arch/powerpc/mm/book3s64/hash_utils.c | 47 ++++++++++++++------------- 1 file changed, 25 insertions(+), 22 deletions(-) diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index ab50bb33a390..11975a2f7403 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -272,11 +272,8 @@ void hash__tlbiel_all(unsigned int action) } #ifdef CONFIG_DEBUG_PAGEALLOC -static u8 *linear_map_hash_slots; -static unsigned long linear_map_hash_count; -static DEFINE_RAW_SPINLOCK(linear_map_hash_lock); - -static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) +static void kernel_map_linear_page(unsigned long vaddr, unsigned long idx, + u8 *slots, raw_spinlock_t *lock) { unsigned long hash; unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); @@ -290,7 +287,7 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) if (!vsid) return; - if (linear_map_hash_slots[lmi] & 0x80) + if (slots[idx] & 0x80) return; ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode, @@ -298,36 +295,40 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) mmu_linear_psize, mmu_kernel_ssize); BUG_ON (ret < 0); - raw_spin_lock(&linear_map_hash_lock); - BUG_ON(linear_map_hash_slots[lmi] & 0x80); - linear_map_hash_slots[lmi] = ret | 0x80; - raw_spin_unlock(&linear_map_hash_lock); + raw_spin_lock(lock); + BUG_ON(slots[idx] & 0x80); + slots[idx] = ret | 0x80; + raw_spin_unlock(lock); } -static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) +static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long idx, + u8 *slots, raw_spinlock_t *lock) { - unsigned long hash, hidx, slot; + unsigned long hash, hslot, slot; unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize); hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); - raw_spin_lock(&linear_map_hash_lock); - if (!(linear_map_hash_slots[lmi] & 0x80)) { - raw_spin_unlock(&linear_map_hash_lock); + raw_spin_lock(lock); + if (!(slots[idx] & 0x80)) { + raw_spin_unlock(lock); return; } - hidx = linear_map_hash_slots[lmi] & 0x7f; - linear_map_hash_slots[lmi] = 0; - raw_spin_unlock(&linear_map_hash_lock); - if (hidx & _PTEIDX_SECONDARY) + hslot = slots[idx] & 0x7f; + slots[idx] = 0; + raw_spin_unlock(lock); + if (hslot & _PTEIDX_SECONDARY) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; - slot += hidx & _PTEIDX_GROUP_IX; + slot += hslot & _PTEIDX_GROUP_IX; mmu_hash_ops.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_linear_psize, mmu_kernel_ssize, 0); } +static u8 *linear_map_hash_slots; +static unsigned long linear_map_hash_count; +static DEFINE_RAW_SPINLOCK(linear_map_hash_lock); static inline void hash_debug_pagealloc_alloc_slots(void) { if (!debug_pagealloc_enabled()) @@ -362,9 +363,11 @@ static int hash_debug_pagealloc_map_pages(struct page *page, int numpages, if (lmi >= linear_map_hash_count) continue; if (enable) - kernel_map_linear_page(vaddr, lmi); + kernel_map_linear_page(vaddr, lmi, + linear_map_hash_slots, &linear_map_hash_lock); else - kernel_unmap_linear_page(vaddr, lmi); + kernel_unmap_linear_page(vaddr, lmi, + linear_map_hash_slots, &linear_map_hash_lock); } local_irq_restore(flags); return 0; -- 2.46.0