walk_page_range() is going to be allowed to walk page tables other than those of user space. For this it needs to know when it has reached a 'leaf' entry in the page tables. This information is provided by the p?d_large() functions/macros. For um, we don't support large pages, so add stubs returning 0. CC: Jeff Dike <jdike@xxxxxxxxxxx> CC: Richard Weinberger <richard@xxxxxx> CC: Anton Ivanov <anton.ivanov@xxxxxxxxxxxxxxxxxx> CC: linux-um@xxxxxxxxxxxxxxxxxxx Signed-off-by: Steven Price <steven.price@xxxxxxx> --- arch/um/include/asm/pgtable-3level.h | 1 + arch/um/include/asm/pgtable.h | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h index c4d876dfb9ac..2abf9aa5808e 100644 --- a/arch/um/include/asm/pgtable-3level.h +++ b/arch/um/include/asm/pgtable-3level.h @@ -57,6 +57,7 @@ #define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE)) #define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) #define pud_present(x) (pud_val(x) & _PAGE_PRESENT) +#define pud_large(x) (0) #define pud_populate(mm, pud, pmd) \ set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd))) diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h index 9c04562310b3..d5fa4e118dcc 100644 --- a/arch/um/include/asm/pgtable.h +++ b/arch/um/include/asm/pgtable.h @@ -100,6 +100,7 @@ extern unsigned long end_iomem; #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) +#define pmd_large(x) (0) #define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0) #define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE) -- 2.20.1