Implement pud_free_pmd_page() and pmd_free_pte_page(). Implementation requires, 1) Freeing of the un-used next level page tables 2) Clearing off the current pud/pmd entry 3) Invalidate TLB which could have previously valid but not stale entry Signed-off-by: Chintan Pandya <cpandya@xxxxxxxxxxxxxx> --- arch/arm64/mm/mmu.c | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index da98828..c70f139 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -45,6 +45,7 @@ #include <asm/memblock.h> #include <asm/mmu_context.h> #include <asm/ptdump.h> +#include <asm/tlbflush.h> #define NO_BLOCK_MAPPINGS BIT(0) #define NO_CONT_MAPPINGS BIT(1) @@ -975,10 +976,35 @@ int pmd_clear_huge(pmd_t *pmdp) int pud_free_pmd_page(pud_t *pud, unsigned long addr) { - return pud_none(*pud); + pmd_t *pmd; + int i; + + pmd = __va(pud_val(*pud)); + if (pud_val(*pud)) { + for (i = 0; i < PTRS_PER_PMD; i++) + pmd_free_pte_page(&pmd[i], addr + (i * PMD_SIZE)); + + free_page((unsigned long) pmd); + pud_clear(pud); + flush_tlb_kernel_range(addr, addr + PUD_SIZE); + } + return 1; } int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) { - return pmd_none(*pmd); + if (pmd_val(*pmd)) { + free_page((unsigned long)__va(pmd_val(*pmd))); + + pmd_clear(pmd); + /* + * FIXME: __flush_tlb_pgtable(&init_mm, addr) is + * ideal candidate here, which exactly + * flushes intermediate pgtables. But, + * this is broken (evident from tests). + * So, use safe TLB op unless that is fixed. + */ + flush_tlb_kernel_range(addr, addr + PMD_SIZE); + } + return 1; } -- Qualcomm India Private Limited, on behalf of Qualcomm Innovation Center, Inc., is a member of Code Aurora Forum, a Linux Foundation Collaborative Project