Currently, page table setup is done during setup_va_final where fixmap can be used to create the temporary mappings. The physical frame is allocated from memblock_alloc_* functions. However, this won't work if page table mapping needs to be created for a different mm context (i.e. efi mm) at a later point of time. Use generic kernel page allocation function & macros for any mapping after setup_vm_final. Signed-off-by: Atish Patra <atish.patra@xxxxxxx> --- arch/riscv/mm/init.c | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 62c90161765f..9f3129c76985 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -209,6 +209,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; static bool mmu_enabled; +static bool late_mapping; #define MAX_EARLY_MAPPING_SIZE SZ_128M @@ -233,7 +234,9 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) static pte_t *__init get_pte_virt(phys_addr_t pa) { - if (mmu_enabled) { + if (late_mapping) + return (pte_t *) __va(pa); + else if (mmu_enabled) { clear_fixmap(FIX_PTE); return (pte_t *)set_fixmap_offset(FIX_PTE, pa); } else { @@ -243,13 +246,19 @@ static pte_t *__init get_pte_virt(phys_addr_t pa) static phys_addr_t __init alloc_pte(uintptr_t va) { + unsigned long vaddr; /* * We only create PMD or PGD early mappings so we * should never reach here with MMU disabled. */ BUG_ON(!mmu_enabled); - - return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); + if (late_mapping) { + vaddr = __get_free_page(GFP_KERNEL); + if (!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr))) + BUG(); + return __pa(vaddr); + } else + return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); } static void __init create_pte_mapping(pte_t *ptep, @@ -278,7 +287,9 @@ pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE); static pmd_t *__init get_pmd_virt(phys_addr_t pa) { - if (mmu_enabled) { + if (late_mapping) + return (pmd_t *) __va(pa); + else if (mmu_enabled) { clear_fixmap(FIX_PMD); return (pmd_t *)set_fixmap_offset(FIX_PMD, pa); } else { @@ -289,8 +300,13 @@ static pmd_t *__init get_pmd_virt(phys_addr_t pa) static phys_addr_t __init alloc_pmd(uintptr_t va) { uintptr_t pmd_num; + unsigned long vaddr; - if (mmu_enabled) + if (late_mapping) { + vaddr = __get_free_page(GFP_KERNEL); + BUG_ON(!vaddr); + return __pa(vaddr); + } else if (mmu_enabled) return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT; @@ -528,6 +544,9 @@ static void __init setup_vm_final(void) /* Move to swapper page table */ csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE); local_flush_tlb_all(); + + /* generic page allocation function must be used to setup page table */ + late_mapping = true; } #else void *dtb_early_va __initdata; -- 2.24.0