Hi Azriel, Thanks for working on 4 level 4K page support. On 05/01/2016:05:12:39 PM, Azriel Samson wrote: > Add PUD translation for 4 level page tables. > > Signed-off-by: Mansi Patel <mansip at codeaurora.org> > Signed-off-by: Azriel Samson <asamson at codeaurora.org> > --- > This change targets the arm64_support branch of Pratush Anand's repository at: > https://github.com/pratyushanand/makedumpfile.git > > arch/arm64.c | 25 ++++++++++++++++++------- > 1 file changed, 18 insertions(+), 7 deletions(-) > > diff --git a/arch/arm64.c b/arch/arm64.c > index 59825fa..62c76e3 100644 > --- a/arch/arm64.c > +++ b/arch/arm64.c > @@ -35,15 +35,10 @@ typedef struct { > pud_t pud; > } pmd_t; > > -#define pud_offset(pgd, vaddr) ((pud_t *)pgd) > - > #define pgd_val(x) ((x).pgd) > #define pud_val(x) (pgd_val((x).pgd)) > #define pmd_val(x) (pud_val((x).pud)) > > -#define PUD_SHIFT PGDIR_SHIFT > -#define PUD_SIZE (1UL << PUD_SHIFT) > - > typedef struct { > unsigned long pte; > } pte_t; > @@ -59,6 +54,10 @@ typedef struct { > #define PMD_SIZE (1UL << PMD_SHIFT) > #define PMD_MASK (~(PMD_SIZE - 1)) > #define PTRS_PER_PMD PTRS_PER_PTE > +#define PUD_SHIFT ((PAGE_SHIFT - 3) * 3 + 3) It will break system with pgtable_level <= 3. I think it would be better to keep it like as follows: static int get_pud_shift_arm64(void) { if (pgtable_level == 4) return ((PAGE_SHIFT - 3) * 3 + 3); else return PGDIR_SHIFT; } #define PUD_SHIFT get_pud_shift_arm64() > +#define PUD_SIZE (1UL << PUD_SHIFT) > +#define PUD_MASK (~(PUD_SIZE - 1)) > +#define PTRS_PER_PUD PTRS_PER_PTE > > #define PAGE_PRESENT (1 << 0) > #define SECTIONS_SIZE_BITS 30 > @@ -95,6 +94,10 @@ typedef struct { > #define pud_page_vaddr(pud) (__va(pud_val(pud) & PHYS_MASK & (int32_t)PAGE_MASK)) > #define pmd_offset_pgtbl_lvl_3(pud, vaddr) ((pmd_t *)pud_page_vaddr((*pud)) + pmd_index(vaddr)) > Since above definition for PTRS_PER_PUD is true only for 4 level page table so better to keep that define here. > +#define pud_index(vaddr) (((vaddr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) > +#define pgd_page_vaddr(pgd) (__va(pgd_val(pgd) & PHYS_MASK & (int32_t)PAGE_MASK)) > +#define pud_offset(pgd, vaddr) ((pud_t *)pgd_page_vaddr((*pgd)) + pud_index(vaddr)) > + May be following would be better: static pud_t * pud_offset(pgd_t *pgd, unsigned long vaddr) { if (pgtable_level == 4) return ((pud_t *)pgd_page_vaddr((*pgd)) + pud_index(vaddr)); else return (pud_t *)pgd; } > /* kernel struct page size can be kernel version dependent, currently > * keep it constant. > */ > @@ -244,7 +247,7 @@ vtop_arm64(unsigned long vaddr) > { > unsigned long long paddr = NOT_PADDR; > pgd_t *pgda, pgdv; > - pud_t pudv; > + pud_t *puda, pudv; > pmd_t *pmda, pmdv; > pte_t *ptea, ptev; > > @@ -259,7 +262,15 @@ vtop_arm64(unsigned long vaddr) > return NOT_PADDR; > } > > - pudv.pgd = pgdv; > + if (pgtable_level <= 3) { > + pudv.pgd = pgdv; > + } else { With suggested pud_offset() definition, we can get rid of above "if" condition and then pud calculation code would be in sync with other calculations like that of pgd and pmd. > + puda = pud_offset(&pgdv, vaddr); > + if (!readmem(VADDR, (unsigned long long)puda, &pudv, sizeof(pudv))) { > + ERRMSG("Can't read pud\n"); > + return NOT_PADDR; > + } > + } ~Pratyush