The patch titled Subject: set_memory: allow set_direct_map_*_noflush() for multiple pages has been added to the -mm tree. Its filename is set_memory-allow-set_direct_map__noflush-for-multiple-pages.patch This patch should soon appear at https://ozlabs.org/~akpm/mmots/broken-out/set_memory-allow-set_direct_map__noflush-for-multiple-pages.patch and later at https://ozlabs.org/~akpm/mmotm/broken-out/set_memory-allow-set_direct_map__noflush-for-multiple-pages.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Mike Rapoport <rppt@xxxxxxxxxxxxx> Subject: set_memory: allow set_direct_map_*_noflush() for multiple pages The underlying implementations of set_direct_map_invalid_noflush() and set_direct_map_default_noflush() allow updating multiple contiguous pages at once. Add numpages parameter to set_direct_map_*_noflush() to expose this ability with these APIs. Link: https://lkml.kernel.org/r/20210303162209.8609-5-rppt@xxxxxxxxxx Signed-off-by: Mike Rapoport <rppt@xxxxxxxxxxxxx> Acked-by: Catalin Marinas <catalin.marinas@xxxxxxx> [arm64] Cc: Alexander Viro <viro@xxxxxxxxxxxxxxxxxx> Cc: Andy Lutomirski <luto@xxxxxxxxxx> Cc: Arnd Bergmann <arnd@xxxxxxxx> Cc: Borislav Petkov <bp@xxxxxxxxx> Cc: Christopher Lameter <cl@xxxxxxxxx> Cc: Dan Williams <dan.j.williams@xxxxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx> Cc: David Hildenbrand <david@xxxxxxxxxx> Cc: Elena Reshetova <elena.reshetova@xxxxxxxxx> Cc: Hagen Paul Pfeifer <hagen@xxxxxxxx> Cc: "H. Peter Anvin" <hpa@xxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: James Bottomley <jejb@xxxxxxxxxxxxx> Cc: "Kirill A. Shutemov" <kirill@xxxxxxxxxxxxx> Cc: Mark Rutland <mark.rutland@xxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Michael Kerrisk <mtk.manpages@xxxxxxxxx> Cc: Palmer Dabbelt <palmer@xxxxxxxxxxx> Cc: Palmer Dabbelt <palmerdabbelt@xxxxxxxxxx> Cc: Paul Walmsley <paul.walmsley@xxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Rick Edgecombe <rick.p.edgecombe@xxxxxxxxx> Cc: Roman Gushchin <guro@xxxxxx> Cc: Shakeel Butt <shakeelb@xxxxxxxxxx> Cc: Shuah Khan <shuah@xxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Tycho Andersen <tycho@xxxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- arch/arm64/include/asm/cacheflush.h | 4 ++-- arch/arm64/mm/pageattr.c | 10 ++++++---- arch/riscv/include/asm/set_memory.h | 4 ++-- arch/riscv/mm/pageattr.c | 8 ++++---- arch/x86/include/asm/set_memory.h | 4 ++-- arch/x86/mm/pat/set_memory.c | 8 ++++---- include/linux/set_memory.h | 4 ++-- kernel/power/snapshot.c | 4 ++-- mm/vmalloc.c | 5 +++-- 9 files changed, 27 insertions(+), 24 deletions(-) --- a/arch/arm64/include/asm/cacheflush.h~set_memory-allow-set_direct_map__noflush-for-multiple-pages +++ a/arch/arm64/include/asm/cacheflush.h @@ -133,8 +133,8 @@ static __always_inline void __flush_icac int set_memory_valid(unsigned long addr, int numpages, int enable); -int set_direct_map_invalid_noflush(struct page *page); -int set_direct_map_default_noflush(struct page *page); +int set_direct_map_invalid_noflush(struct page *page, int numpages); +int set_direct_map_default_noflush(struct page *page, int numpages); bool kernel_page_present(struct page *page); #include <asm-generic/cacheflush.h> --- a/arch/arm64/mm/pageattr.c~set_memory-allow-set_direct_map__noflush-for-multiple-pages +++ a/arch/arm64/mm/pageattr.c @@ -148,34 +148,36 @@ int set_memory_valid(unsigned long addr, __pgprot(PTE_VALID)); } -int set_direct_map_invalid_noflush(struct page *page) +int set_direct_map_invalid_noflush(struct page *page, int numpages) { struct page_change_data data = { .set_mask = __pgprot(0), .clear_mask = __pgprot(PTE_VALID), }; + unsigned long size = PAGE_SIZE * numpages; if (!debug_pagealloc_enabled() && !rodata_full) return 0; return apply_to_page_range(&init_mm, (unsigned long)page_address(page), - PAGE_SIZE, change_page_range, &data); + size, change_page_range, &data); } -int set_direct_map_default_noflush(struct page *page) +int set_direct_map_default_noflush(struct page *page, int numpages) { struct page_change_data data = { .set_mask = __pgprot(PTE_VALID | PTE_WRITE), .clear_mask = __pgprot(PTE_RDONLY), }; + unsigned long size = PAGE_SIZE * numpages; if (!debug_pagealloc_enabled() && !rodata_full) return 0; return apply_to_page_range(&init_mm, (unsigned long)page_address(page), - PAGE_SIZE, change_page_range, &data); + size, change_page_range, &data); } #ifdef CONFIG_DEBUG_PAGEALLOC --- a/arch/riscv/include/asm/set_memory.h~set_memory-allow-set_direct_map__noflush-for-multiple-pages +++ a/arch/riscv/include/asm/set_memory.h @@ -26,8 +26,8 @@ static inline void protect_kernel_text_d static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; } #endif -int set_direct_map_invalid_noflush(struct page *page); -int set_direct_map_default_noflush(struct page *page); +int set_direct_map_invalid_noflush(struct page *page, int numpages); +int set_direct_map_default_noflush(struct page *page, int numpages); bool kernel_page_present(struct page *page); #endif /* __ASSEMBLY__ */ --- a/arch/riscv/mm/pageattr.c~set_memory-allow-set_direct_map__noflush-for-multiple-pages +++ a/arch/riscv/mm/pageattr.c @@ -156,11 +156,11 @@ int set_memory_nx(unsigned long addr, in return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC)); } -int set_direct_map_invalid_noflush(struct page *page) +int set_direct_map_invalid_noflush(struct page *page, int numpages) { int ret; unsigned long start = (unsigned long)page_address(page); - unsigned long end = start + PAGE_SIZE; + unsigned long end = start + PAGE_SIZE * numpages; struct pageattr_masks masks = { .set_mask = __pgprot(0), .clear_mask = __pgprot(_PAGE_PRESENT) @@ -173,11 +173,11 @@ int set_direct_map_invalid_noflush(struc return ret; } -int set_direct_map_default_noflush(struct page *page) +int set_direct_map_default_noflush(struct page *page, int numpages) { int ret; unsigned long start = (unsigned long)page_address(page); - unsigned long end = start + PAGE_SIZE; + unsigned long end = start + PAGE_SIZE * numpages; struct pageattr_masks masks = { .set_mask = PAGE_KERNEL, .clear_mask = __pgprot(0) --- a/arch/x86/include/asm/set_memory.h~set_memory-allow-set_direct_map__noflush-for-multiple-pages +++ a/arch/x86/include/asm/set_memory.h @@ -80,8 +80,8 @@ int set_pages_wb(struct page *page, int int set_pages_ro(struct page *page, int numpages); int set_pages_rw(struct page *page, int numpages); -int set_direct_map_invalid_noflush(struct page *page); -int set_direct_map_default_noflush(struct page *page); +int set_direct_map_invalid_noflush(struct page *page, int numpages); +int set_direct_map_default_noflush(struct page *page, int numpages); bool kernel_page_present(struct page *page); extern int kernel_set_to_readonly; --- a/arch/x86/mm/pat/set_memory.c~set_memory-allow-set_direct_map__noflush-for-multiple-pages +++ a/arch/x86/mm/pat/set_memory.c @@ -2192,14 +2192,14 @@ static int __set_pages_np(struct page *p return __change_page_attr_set_clr(&cpa, 0); } -int set_direct_map_invalid_noflush(struct page *page) +int set_direct_map_invalid_noflush(struct page *page, int numpages) { - return __set_pages_np(page, 1); + return __set_pages_np(page, numpages); } -int set_direct_map_default_noflush(struct page *page) +int set_direct_map_default_noflush(struct page *page, int numpages) { - return __set_pages_p(page, 1); + return __set_pages_p(page, numpages); } #ifdef CONFIG_DEBUG_PAGEALLOC --- a/include/linux/set_memory.h~set_memory-allow-set_direct_map__noflush-for-multiple-pages +++ a/include/linux/set_memory.h @@ -15,11 +15,11 @@ static inline int set_memory_nx(unsigned #endif #ifndef CONFIG_ARCH_HAS_SET_DIRECT_MAP -static inline int set_direct_map_invalid_noflush(struct page *page) +static inline int set_direct_map_invalid_noflush(struct page *page, int numpages) { return 0; } -static inline int set_direct_map_default_noflush(struct page *page) +static inline int set_direct_map_default_noflush(struct page *page, int numpages) { return 0; } --- a/kernel/power/snapshot.c~set_memory-allow-set_direct_map__noflush-for-multiple-pages +++ a/kernel/power/snapshot.c @@ -86,7 +86,7 @@ static inline void hibernate_restore_unp static inline void hibernate_map_page(struct page *page) { if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) { - int ret = set_direct_map_default_noflush(page); + int ret = set_direct_map_default_noflush(page, 1); if (ret) pr_warn_once("Failed to remap page\n"); @@ -99,7 +99,7 @@ static inline void hibernate_unmap_page( { if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) { unsigned long addr = (unsigned long)page_address(page); - int ret = set_direct_map_invalid_noflush(page); + int ret = set_direct_map_invalid_noflush(page, 1); if (ret) pr_warn_once("Failed to remap page\n"); --- a/mm/vmalloc.c~set_memory-allow-set_direct_map__noflush-for-multiple-pages +++ a/mm/vmalloc.c @@ -2195,13 +2195,14 @@ struct vm_struct *remove_vm_area(const v } static inline void set_area_direct_map(const struct vm_struct *area, - int (*set_direct_map)(struct page *page)) + int (*set_direct_map)(struct page *page, + int numpages)) { int i; for (i = 0; i < area->nr_pages; i++) if (page_address(area->pages[i])) - set_direct_map(area->pages[i]); + set_direct_map(area->pages[i], 1); } /* Handle removing and resetting vm mappings related to the vm_struct. */ _ Patches currently in -mm which might be from rppt@xxxxxxxxxxxxx are mm-add-definition-of-pmd_page_order.patch mmap-make-mlock_future_check-global.patch riscv-kconfig-make-direct-map-manipulation-options-depend-on-mmu.patch set_memory-allow-set_direct_map__noflush-for-multiple-pages.patch set_memory-allow-querying-whether-set_direct_map_-is-actually-enabled.patch mm-introduce-memfd_secret-system-call-to-create-secret-memory-areas.patch pm-hibernate-disable-when-there-are-active-secretmem-users.patch arch-mm-wire-up-memfd_secret-system-call-where-relevant.patch secretmem-test-add-basic-selftest-for-memfd_secret2.patch