From: Zhaoyang Huang <zhaoyang.huang@xxxxxxxxxx> Since there is no PTE_CONT when rodata_full in ARM64, introducing a hook function to apply PTE_CONT on the proper page blocks. Signed-off-by: Zhaoyang Huang <zhaoyang.huang@xxxxxxxxxx> --- arch/arm64/include/asm/page.h | 5 +++++ arch/arm64/mm/pageattr.c | 45 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+) diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index f98c91b..53cdd09 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -46,6 +46,11 @@ struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, #include <asm/memory.h> +#define HAVE_ARCH_ALLOC_PAGE +#define HAVE_ARCH_FREE_PAGE + +extern void arch_alloc_page(struct page *page, int order); +extern void arch_free_page(struct page *page, int order); #endif /* !__ASSEMBLY__ */ #define VM_DATA_DEFAULT_FLAGS (VM_DATA_FLAGS_TSK_EXEC | VM_MTE_ALLOWED) diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index a3bacd7..815a06d 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -239,3 +239,48 @@ bool kernel_page_present(struct page *page) ptep = pte_offset_kernel(pmdp, addr); return pte_valid(READ_ONCE(*ptep)); } + +void arch_alloc_page(struct page *page, int order) +{ + unsigned long addr; + unsigned long cont_pte_low_bound; + + if (!rodata_full) + return; + + addr = (u64)page_address(page); + if ((order >= 4) && (addr & ~CONT_PTE_MASK) == 0) { + order -= 4; + do { + cont_pte_low_bound = addr & CONT_PTE_MASK; + __change_memory_common(cont_pte_low_bound, + (~CONT_PTE_MASK + 1), __pgprot(PTE_CONT), __pgprot(0)); + addr = (u64)page_address(page); + page += 4; + order--; + }while (order >= 0); + } +} + +void arch_free_page(struct page *page, int order) +{ + unsigned long addr; + unsigned long cont_pte_low_bound; + + if (!rodata_full) + return; + + addr = (u64)page_address(page); + if ((order >= 4) && (addr & ~CONT_PTE_MASK) == 0) { + order -= 4; + do { + cont_pte_low_bound = addr & CONT_PTE_MASK; + __change_memory_common(cont_pte_low_bound, + (~CONT_PTE_MASK + 1), __pgprot(0), __pgprot(PTE_CONT)); + addr = (u64)page_address(page); + page += 4; + order--; + }while (order >= 0); + } +} + -- 1.9.1