Implement pud_free_pmd_page() and pmd_free_pte_page(). Implementation requires, 1) Freeing of the un-used next level page tables 2) Clearing off the current pud/pmd entry 3) Invalidate TLB which could have previously valid but not stale entry Signed-off-by: Chintan Pandya <cpandya@xxxxxxxxxxxxxx> --- arch/arm64/mm/mmu.c | 40 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index da98828..7be3106 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -45,6 +45,7 @@ #include <asm/memblock.h> #include <asm/mmu_context.h> #include <asm/ptdump.h> +#include <asm/tlbflush.h> #define NO_BLOCK_MAPPINGS BIT(0) #define NO_CONT_MAPPINGS BIT(1) @@ -973,12 +974,47 @@ int pmd_clear_huge(pmd_t *pmdp) return 1; } +static int __pmd_free_pte_page(pmd_t *pmd, unsigned long addr, bool tlb_inv) +{ + pmd_t *table; + + if (pmd_val(*pmd)) { + table = __va(pmd_val(*pmd)); + pmd_clear(pmd); + /* + * FIXME: __flush_tlb_pgtable(&init_mm, addr) is + * ideal candidate here, which exactly + * flushes intermediate pgtables. But, + * this is broken (evident from tests). + * So, use safe TLB op unless that is fixed. + */ + if (tlb_inv) + flush_tlb_kernel_range(addr, addr + PMD_SIZE); + + free_page((unsigned long) table); + } + return 1; +} + int pud_free_pmd_page(pud_t *pud, unsigned long addr) { - return pud_none(*pud); + pmd_t *table; + int i; + + if (pud_val(*pud)) { + table = __va(pud_val(*pud)); + for (i = 0; i < PTRS_PER_PMD; i++) + __pmd_free_pte_page(&table[i], addr + (i * PMD_SIZE), + false); + + pud_clear(pud); + flush_tlb_kernel_range(addr, addr + PUD_SIZE); + free_page((unsigned long) table); + } + return 1; } int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) { - return pmd_none(*pmd); + return __pmd_free_pte_page(pmd, addr, true); } -- Qualcomm India Private Limited, on behalf of Qualcomm Innovation Center, Inc., is a member of Code Aurora Forum, a Linux Foundation Collaborative Project