Implement pud_free_pmd_page() and pmd_free_pte_page(). Implementation requires, 1) Freeing of the un-used next level page tables 2) Clearing off the current pud/pmd entry 3) Invalidate TLB which could have previously valid but not stale entry Signed-off-by: Chintan Pandya <cpandya@xxxxxxxxxxxxxx> --- V4->V5: - Using __flush_tlb_kernel_pgtable instead of flush_tlb_kernel_range arch/arm64/mm/mmu.c | 33 +++++++++++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index da98828..3552c7a 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -45,6 +45,7 @@ #include <asm/memblock.h> #include <asm/mmu_context.h> #include <asm/ptdump.h> +#include <asm/tlbflush.h> #define NO_BLOCK_MAPPINGS BIT(0) #define NO_CONT_MAPPINGS BIT(1) @@ -973,12 +974,40 @@ int pmd_clear_huge(pmd_t *pmdp) return 1; } +static int __pmd_free_pte_page(pmd_t *pmd, unsigned long addr, bool tlb_inv) +{ + pmd_t *table; + + if (pmd_val(*pmd)) { + table = __va(pmd_val(*pmd)); + pmd_clear(pmd); + if (tlb_inv) + __flush_tlb_kernel_pgtable(addr); + + free_page((unsigned long) table); + } + return 1; +} + int pud_free_pmd_page(pud_t *pud, unsigned long addr) { - return pud_none(*pud); + pmd_t *table; + int i; + + if (pud_val(*pud)) { + table = __va(pud_val(*pud)); + for (i = 0; i < PTRS_PER_PMD; i++) + __pmd_free_pte_page(&table[i], addr + (i * PMD_SIZE), + false); + + pud_clear(pud); + flush_tlb_kernel_range(addr, addr + PUD_SIZE); + free_page((unsigned long) table); + } + return 1; } int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) { - return pmd_none(*pmd); + return __pmd_free_pte_page(pmd, addr, true); } -- Qualcomm India Private Limited, on behalf of Qualcomm Innovation Center, Inc., is a member of Code Aurora Forum, a Linux Foundation Collaborative Project