Hi, Enze, On Wed, Jul 19, 2023 at 4:34 PM Enze Li <lienze@xxxxxxxxxx> wrote: > > According to LoongArch documentation online, there are two types of address > translation modes: direct mapped address translation mode (direct mapped mode) > and page table mapped address translation mode (page table mapped mode). > > Currently, the upstream code only supports DMM (Direct Mapped Mode). > This patch adds a function that determines whether PTMM (Page Table > Mapped Mode) should be used, and also adds the corresponding handler > funcitons for both modes. > > For more details on the two modes, see [1]. > > [1] https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#virtual-address-space-and-address-translation-mode > > Signed-off-by: Enze Li <lienze@xxxxxxxxxx> > --- > arch/loongarch/include/asm/page.h | 10 ++++++++++ > arch/loongarch/include/asm/pgtable.h | 6 ++++++ > arch/loongarch/mm/pgtable.c | 25 +++++++++++++++++++++++++ > 3 files changed, 41 insertions(+) > > diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h > index 26e8dccb6619..05919be15801 100644 > --- a/arch/loongarch/include/asm/page.h > +++ b/arch/loongarch/include/asm/page.h > @@ -84,7 +84,17 @@ typedef struct { unsigned long pgprot; } pgprot_t; > #define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) > > #define virt_to_pfn(kaddr) PFN_DOWN(PHYSADDR(kaddr)) > + > +#ifdef CONFIG_64BIT > +#define virt_to_page(kaddr) \ > +({ \ > + is_PTMM_addr((unsigned long)kaddr) ? \ > + PTMM_virt_to_page((unsigned long)kaddr) : \ > + DMM_virt_to_page((unsigned long)kaddr); \ > +}) 1, Rename these helpers to is_dmw_addr()/dmw_virt_to_page()/tlb_virt_to_page() will be better. 2, These helpers are so simple so can be defined as inline function or macros in page.h. 3, CONFIG_64BIT can be removed here. Huacai > +#else > #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) > +#endif > > extern int __virt_addr_valid(volatile void *kaddr); > #define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr)) > diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h > index ed6a37bb55b5..0fc074b8bd48 100644 > --- a/arch/loongarch/include/asm/pgtable.h > +++ b/arch/loongarch/include/asm/pgtable.h > @@ -360,6 +360,12 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt > #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1) > #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) > > +#ifdef CONFIG_64BIT > +struct page *DMM_virt_to_page(unsigned long kaddr); > +struct page *PTMM_virt_to_page(unsigned long kaddr); > +bool is_PTMM_addr(unsigned long kaddr); > +#endif > + > extern pgd_t swapper_pg_dir[]; > extern pgd_t invalid_pg_dir[]; > > diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c > index 36a6dc0148ae..4c6448f996b6 100644 > --- a/arch/loongarch/mm/pgtable.c > +++ b/arch/loongarch/mm/pgtable.c > @@ -9,6 +9,31 @@ > #include <asm/pgtable.h> > #include <asm/tlbflush.h> > > +#ifdef CONFIG_64BIT > +/* DMM stands for Direct Mapped Mode. */ > +struct page *DMM_virt_to_page(unsigned long kaddr) > +{ > + return pfn_to_page(virt_to_pfn(kaddr)); > +} > +EXPORT_SYMBOL_GPL(DMM_virt_to_page); > + > +/* PTMM stands for Page Table Mapped Mode. */ > +struct page *PTMM_virt_to_page(unsigned long kaddr) > +{ > + return pte_page(*virt_to_kpte(kaddr)); > +} > +EXPORT_SYMBOL_GPL(PTMM_virt_to_page); > + > +bool is_PTMM_addr(unsigned long kaddr) > +{ > + if (unlikely((kaddr & GENMASK(BITS_PER_LONG - 1, cpu_vabits)) == > + GENMASK(BITS_PER_LONG - 1, cpu_vabits))) > + return true; > + return false; > +} > +EXPORT_SYMBOL_GPL(is_PTMM_addr); > +#endif > + > pgd_t *pgd_alloc(struct mm_struct *mm) > { > pgd_t *ret, *init; > -- > 2.34.1 > >