The patch titled Subject: arch: introduce set_direct_map_valid_noflush() has been added to the -mm mm-unstable branch. Its filename is arch-introduce-set_direct_map_valid_noflush.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/arch-introduce-set_direct_map_valid_noflush.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: "Mike Rapoport (Microsoft)" <rppt@xxxxxxxxxx> Subject: arch: introduce set_direct_map_valid_noflush() Date: Wed, 16 Oct 2024 15:24:21 +0300 Add an API that will allow updates of the direct/linear map for a set of physically contiguous pages. It will be used in the following patches. Link: https://lkml.kernel.org/r/20241016122424.1655560-6-rppt@xxxxxxxxxx Signed-off-by: Mike Rapoport (Microsoft) <rppt@xxxxxxxxxx> Reviewed-by: Christoph Hellwig <hch@xxxxxx> Cc: Andreas Larsson <andreas@xxxxxxxxxxx> Cc: Andy Lutomirski <luto@xxxxxxxxxx> Cc: Ard Biesheuvel <ardb@xxxxxxxxxx> Cc: Arnd Bergmann <arnd@xxxxxxxx> Cc: Borislav Petkov (AMD) <bp@xxxxxxxxx> Cc: Brian Cain <bcain@xxxxxxxxxxx> Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Christophe Leroy <christophe.leroy@xxxxxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx> Cc: Dinh Nguyen <dinguyen@xxxxxxxxxx> Cc: Geert Uytterhoeven <geert@xxxxxxxxxxxxxx> Cc: Guo Ren <guoren@xxxxxxxxxx> Cc: Helge Deller <deller@xxxxxx> Cc: Huacai Chen <chenhuacai@xxxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: Johannes Berg <johannes@xxxxxxxxxxxxxxxx> Cc: John Paul Adrian Glaubitz <glaubitz@xxxxxxxxxxxxxxxxxxx> Cc: Kent Overstreet <kent.overstreet@xxxxxxxxx> Cc: "Liam R. Howlett" <Liam.Howlett@xxxxxxxxxx> Cc: Luis Chamberlain <mcgrof@xxxxxxxxxx> Cc: Mark Rutland <mark.rutland@xxxxxxx> Cc: Masami Hiramatsu <mhiramat@xxxxxxxxxx> Cc: Matt Turner <mattst88@xxxxxxxxx> Cc: Max Filippov <jcmvbkbc@xxxxxxxxx> Cc: Michael Ellerman <mpe@xxxxxxxxxxxxxx> Cc: Michal Simek <monstr@xxxxxxxxx> Cc: Oleg Nesterov <oleg@xxxxxxxxxx> Cc: Palmer Dabbelt <palmer@xxxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Richard Weinberger <richard@xxxxxx> Cc: Russell King <linux@xxxxxxxxxxxxxxx> Cc: Song Liu <song@xxxxxxxxxx> Cc: Stafford Horne <shorne@xxxxxxxxx> Cc: Steven Rostedt (Google) <rostedt@xxxxxxxxxxx> Cc: Suren Baghdasaryan <surenb@xxxxxxxxxx> Cc: Thomas Bogendoerfer <tsbogend@xxxxxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Uladzislau Rezki (Sony) <urezki@xxxxxxxxx> Cc: Vineet Gupta <vgupta@xxxxxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- arch/arm64/include/asm/set_memory.h | 1 + arch/arm64/mm/pageattr.c | 10 ++++++++++ arch/loongarch/include/asm/set_memory.h | 1 + arch/loongarch/mm/pageattr.c | 19 +++++++++++++++++++ arch/riscv/include/asm/set_memory.h | 1 + arch/riscv/mm/pageattr.c | 15 +++++++++++++++ arch/s390/include/asm/set_memory.h | 1 + arch/s390/mm/pageattr.c | 11 +++++++++++ arch/x86/include/asm/set_memory.h | 1 + arch/x86/mm/pat/set_memory.c | 8 ++++++++ include/linux/set_memory.h | 6 ++++++ 11 files changed, 74 insertions(+) --- a/arch/arm64/include/asm/set_memory.h~arch-introduce-set_direct_map_valid_noflush +++ a/arch/arm64/include/asm/set_memory.h @@ -13,6 +13,7 @@ int set_memory_valid(unsigned long addr, int set_direct_map_invalid_noflush(struct page *page); int set_direct_map_default_noflush(struct page *page); +int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid); bool kernel_page_present(struct page *page); #endif /* _ASM_ARM64_SET_MEMORY_H */ --- a/arch/arm64/mm/pageattr.c~arch-introduce-set_direct_map_valid_noflush +++ a/arch/arm64/mm/pageattr.c @@ -192,6 +192,16 @@ int set_direct_map_default_noflush(struc PAGE_SIZE, change_page_range, &data); } +int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid) +{ + unsigned long addr = (unsigned long)page_address(page); + + if (!can_set_direct_map()) + return 0; + + return set_memory_valid(addr, nr, valid); +} + #ifdef CONFIG_DEBUG_PAGEALLOC void __kernel_map_pages(struct page *page, int numpages, int enable) { --- a/arch/loongarch/include/asm/set_memory.h~arch-introduce-set_direct_map_valid_noflush +++ a/arch/loongarch/include/asm/set_memory.h @@ -17,5 +17,6 @@ int set_memory_rw(unsigned long addr, in bool kernel_page_present(struct page *page); int set_direct_map_default_noflush(struct page *page); int set_direct_map_invalid_noflush(struct page *page); +int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid); #endif /* _ASM_LOONGARCH_SET_MEMORY_H */ --- a/arch/loongarch/mm/pageattr.c~arch-introduce-set_direct_map_valid_noflush +++ a/arch/loongarch/mm/pageattr.c @@ -216,3 +216,22 @@ int set_direct_map_invalid_noflush(struc return __set_memory(addr, 1, __pgprot(0), __pgprot(_PAGE_PRESENT | _PAGE_VALID)); } + +int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid) +{ + unsigned long addr = (unsigned long)page_address(page); + pgprot_t set, clear; + + if (addr < vm_map_base) + return 0; + + if (valid) { + set = PAGE_KERNEL; + clear = __pgprot(0); + } else { + set = __pgprot(0); + clear = __pgprot(_PAGE_PRESENT | _PAGE_VALID); + } + + return __set_memory(addr, 1, set, clear); +} --- a/arch/riscv/include/asm/set_memory.h~arch-introduce-set_direct_map_valid_noflush +++ a/arch/riscv/include/asm/set_memory.h @@ -42,6 +42,7 @@ static inline int set_kernel_memory(char int set_direct_map_invalid_noflush(struct page *page); int set_direct_map_default_noflush(struct page *page); +int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid); bool kernel_page_present(struct page *page); #endif /* __ASSEMBLY__ */ --- a/arch/riscv/mm/pageattr.c~arch-introduce-set_direct_map_valid_noflush +++ a/arch/riscv/mm/pageattr.c @@ -386,6 +386,21 @@ int set_direct_map_default_noflush(struc PAGE_KERNEL, __pgprot(_PAGE_EXEC)); } +int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid) +{ + pgprot_t set, clear; + + if (valid) { + set = PAGE_KERNEL; + clear = __pgprot(_PAGE_EXEC); + } else { + set = __pgprot(0); + clear = __pgprot(_PAGE_PRESENT); + } + + return __set_memory((unsigned long)page_address(page), nr, set, clear); +} + #ifdef CONFIG_DEBUG_PAGEALLOC static int debug_pagealloc_set_page(pte_t *pte, unsigned long addr, void *data) { --- a/arch/s390/include/asm/set_memory.h~arch-introduce-set_direct_map_valid_noflush +++ a/arch/s390/include/asm/set_memory.h @@ -62,5 +62,6 @@ __SET_MEMORY_FUNC(set_memory_4k, SET_MEM int set_direct_map_invalid_noflush(struct page *page); int set_direct_map_default_noflush(struct page *page); +int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid); #endif --- a/arch/s390/mm/pageattr.c~arch-introduce-set_direct_map_valid_noflush +++ a/arch/s390/mm/pageattr.c @@ -406,6 +406,17 @@ int set_direct_map_default_noflush(struc return __set_memory((unsigned long)page_to_virt(page), 1, SET_MEMORY_DEF); } +int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid) +{ + unsigned long flags; + + if (valid) + flags = SET_MEMORY_DEF; + else + flags = SET_MEMORY_INV; + + return __set_memory((unsigned long)page_to_virt(page), nr, flags); +} #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE) static void ipte_range(pte_t *pte, unsigned long address, int nr) --- a/arch/x86/include/asm/set_memory.h~arch-introduce-set_direct_map_valid_noflush +++ a/arch/x86/include/asm/set_memory.h @@ -89,6 +89,7 @@ int set_pages_rw(struct page *page, int int set_direct_map_invalid_noflush(struct page *page); int set_direct_map_default_noflush(struct page *page); +int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid); bool kernel_page_present(struct page *page); extern int kernel_set_to_readonly; --- a/arch/x86/mm/pat/set_memory.c~arch-introduce-set_direct_map_valid_noflush +++ a/arch/x86/mm/pat/set_memory.c @@ -2444,6 +2444,14 @@ int set_direct_map_default_noflush(struc return __set_pages_p(page, 1); } +int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid) +{ + if (valid) + return __set_pages_p(page, nr); + + return __set_pages_np(page, nr); +} + #ifdef CONFIG_DEBUG_PAGEALLOC void __kernel_map_pages(struct page *page, int numpages, int enable) { --- a/include/linux/set_memory.h~arch-introduce-set_direct_map_valid_noflush +++ a/include/linux/set_memory.h @@ -34,6 +34,12 @@ static inline int set_direct_map_default return 0; } +static inline int set_direct_map_valid_noflush(struct page *page, + unsigned nr, bool valid) +{ + return 0; +} + static inline bool kernel_page_present(struct page *page) { return true; _ Patches currently in -mm which might be from rppt@xxxxxxxxxx are mm-kmemleak-fix-typo-in-object_no_scan-comment.patch mm-vmalloc-group-declarations-depending-on-config_mmu-together.patch mm-vmalloc-dont-account-for-number-of-nodes-for-huge_vmap-allocations.patch asm-generic-introduce-text-patchingh.patch module-prepare-to-handle-rox-allocations-for-text.patch arch-introduce-set_direct_map_valid_noflush.patch x86-module-prepare-module-loading-for-rox-allocations-of-text.patch execmem-add-support-for-cache-of-large-rox-pages.patch x86-module-enable-rox-caches-for-module-text-on-64-bit.patch