Now that all PTE manipulations for user space tables go via the fixmap, we can remap these tables read-only in the linear region so they cannot be corrupted inadvertently. Signed-off-by: Ard Biesheuvel <ardb@xxxxxxxxxx> --- arch/arm64/include/asm/pgalloc.h | 5 +++++ arch/arm64/include/asm/tlb.h | 2 ++ arch/arm64/mm/mmu.c | 23 ++++++++++++++++++++ 3 files changed, 30 insertions(+) diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index 63f9ae9e96fe..18a5bb0c9ee4 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -18,10 +18,15 @@ #define __HAVE_ARCH_PUD_FREE #define __HAVE_ARCH_PMD_ALLOC_ONE #define __HAVE_ARCH_PMD_FREE +#define __HAVE_ARCH_PTE_ALLOC_ONE +#define __HAVE_ARCH_PTE_FREE #include <asm-generic/pgalloc.h> #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) +pgtable_t pte_alloc_one(struct mm_struct *mm); +void pte_free(struct mm_struct *mm, struct page *pte_page); + #if CONFIG_PGTABLE_LEVELS > 2 pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr); diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 0f54fbb59bba..e69a44160cce 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -75,6 +75,8 @@ static inline void tlb_flush(struct mmu_gather *tlb) static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr) { + if (page_tables_are_ro()) + set_pgtable_rw(page_address(pte)); pgtable_pte_page_dtor(pte); tlb_remove_table(tlb, pte); } diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index e55d91a5f1ed..949846654797 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1686,3 +1686,26 @@ void pmd_free(struct mm_struct *mm, pmd_t *pmd) free_page((u64)pmd); } #endif + +pgtable_t pte_alloc_one(struct mm_struct *mm) +{ + pgtable_t pgt = __pte_alloc_one(mm, GFP_PGTABLE_USER); + + VM_BUG_ON(mm == &init_mm); + + if (!pgt) + return NULL; + if (page_tables_are_ro()) + set_pgtable_ro(page_address(pgt)); + return pgt; +} + +void pte_free(struct mm_struct *mm, struct page *pte_page) +{ + VM_BUG_ON(mm == &init_mm); + + if (page_tables_are_ro()) + set_pgtable_rw(page_address(pte_page)); + pgtable_pte_page_dtor(pte_page); + __free_page(pte_page); +} -- 2.30.2 _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm