Hi Matthew, On Tue, Apr 28, 2020 at 12:44:46PM -0700, Matthew Wilcox wrote: > From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx> > > An upcoming patch will pass mm_struct to the page table constructor. > Make sure arm64 has the appropriate mm_struct at the point it needs to > call the constructor. > > Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> This generally looks good ot me. I was a little scared that we'd need to update the EFI mapping code, but I see that already passes its mm into create_pgd_mapping(), and everything else uses init_mm today. One small comment below. > --- > arch/arm64/mm/mmu.c | 89 ++++++++++++++++++++++----------------------- > 1 file changed, 43 insertions(+), 46 deletions(-) > > diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c > index a374e4f51a62..69ecc83c3be0 100644 > --- a/arch/arm64/mm/mmu.c > +++ b/arch/arm64/mm/mmu.c > @@ -88,7 +88,9 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, > } > EXPORT_SYMBOL(phys_mem_access_prot); > > -static phys_addr_t __init early_pgtable_alloc(int shift) > +typedef phys_addr_t (arm_pt_alloc_t)(int size, struct mm_struct *); Sorry to bikeshed, but for consistency with the naming scheme used here could we please call this 'pgtable_alloc_fn' ? We generally use 'pgtable' here, and 'fn' makes it clearer that this is a function pointer rather than data. The 'arm_' prefix is also a bit unusual here, and I don't think we need it. With that: Reviewed-by: Mark Rutland <mark.rutland@xxxxxxx> Mark. > @@ -333,11 +332,9 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, > pud_clear_fixmap(); > } > > -static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, > - unsigned long virt, phys_addr_t size, > - pgprot_t prot, > - phys_addr_t (*pgtable_alloc)(int), > - int flags) > +static void __create_pgd_mapping(struct mm_struct *mm, pgd_t *pgdir, > + phys_addr_t phys, unsigned long virt, phys_addr_t size, > + pgprot_t prot, arm_pt_alloc_t pgtable_alloc, int flags) > { > unsigned long addr, end, next; > pgd_t *pgdp = pgd_offset_raw(pgdir, virt); > @@ -355,13 +352,13 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, > > do { > next = pgd_addr_end(addr, end); > - alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc, > + alloc_init_pud(mm, pgdp, addr, next, phys, prot, pgtable_alloc, > flags); > phys += next - addr; > } while (pgdp++, addr = next, addr != end); > } > > -static phys_addr_t __pgd_pgtable_alloc(int shift) > +static phys_addr_t __pgd_pgtable_alloc(int shift, struct mm_struct *mm) > { > void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL); > BUG_ON(!ptr); > @@ -371,9 +368,9 @@ static phys_addr_t __pgd_pgtable_alloc(int shift) > return __pa(ptr); > } > > -static phys_addr_t pgd_pgtable_alloc(int shift) > +static phys_addr_t pgd_pgtable_alloc(int shift, struct mm_struct *mm) > { > - phys_addr_t pa = __pgd_pgtable_alloc(shift); > + phys_addr_t pa = __pgd_pgtable_alloc(shift, mm); > > /* > * Call proper page table ctor in case later we need to > @@ -404,8 +401,8 @@ static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, > &phys, virt); > return; > } > - __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, > - NO_CONT_MAPPINGS); > + __create_pgd_mapping(&init_mm, init_mm.pgd, phys, virt, size, prot, > + NULL, NO_CONT_MAPPINGS); > } > > void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, > @@ -419,7 +416,7 @@ void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, > if (page_mappings_only) > flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; > > - __create_pgd_mapping(mm->pgd, phys, virt, size, prot, > + __create_pgd_mapping(mm, mm->pgd, phys, virt, size, prot, > pgd_pgtable_alloc, flags); > } > > @@ -432,8 +429,8 @@ static void update_mapping_prot(phys_addr_t phys, unsigned long virt, > return; > } > > - __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, > - NO_CONT_MAPPINGS); > + __create_pgd_mapping(&init_mm, init_mm.pgd, phys, virt, size, prot, > + NULL, NO_CONT_MAPPINGS); > > /* flush the TLBs after updating live kernel mappings */ > flush_tlb_kernel_range(virt, virt + size); > @@ -442,8 +439,8 @@ static void update_mapping_prot(phys_addr_t phys, unsigned long virt, > static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start, > phys_addr_t end, pgprot_t prot, int flags) > { > - __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start, > - prot, early_pgtable_alloc, flags); > + __create_pgd_mapping(&init_mm, pgdp, start, __phys_to_virt(start), > + end - start, prot, early_pgtable_alloc, flags); > } > > void __init mark_linear_text_alias_ro(void) > @@ -547,8 +544,8 @@ static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end, > BUG_ON(!PAGE_ALIGNED(pa_start)); > BUG_ON(!PAGE_ALIGNED(size)); > > - __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot, > - early_pgtable_alloc, flags); > + __create_pgd_mapping(&init_mm, pgdp, pa_start, (unsigned long)va_start, > + size, prot, early_pgtable_alloc, flags); > > if (!(vm_flags & VM_NO_GUARD)) > size += PAGE_SIZE; > @@ -591,8 +588,8 @@ static int __init map_entry_trampoline(void) > > /* Map only the text into the trampoline page table */ > memset(tramp_pg_dir, 0, PGD_SIZE); > - __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE, > - prot, __pgd_pgtable_alloc, 0); > + __create_pgd_mapping(&init_mm, tramp_pg_dir, pa_start, TRAMP_VALIAS, > + PAGE_SIZE, prot, __pgd_pgtable_alloc, 0); > > /* Map both the text and data into the kernel page table */ > __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot); > @@ -1381,9 +1378,9 @@ int arch_add_memory(int nid, u64 start, u64 size, > if (rodata_full || debug_pagealloc_enabled()) > flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; > > - __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), > - size, params->pgprot, __pgd_pgtable_alloc, > - flags); > + __create_pgd_mapping(&init_mm, swapper_pg_dir, start, > + __phys_to_virt(start), size, params->pgprot, > + __pgd_pgtable_alloc, flags); > > memblock_clear_nomap(start, size); > > -- > 2.26.2 >