The patch titled Subject: mm, page_poison: use static key more efficiently-fix has been added to the -mm tree. Its filename is mm-page_poison-use-static-key-more-efficiently-fix.patch This patch should soon appear at https://ozlabs.org/~akpm/mmots/broken-out/mm-page_poison-use-static-key-more-efficiently-fix.patch and later at https://ozlabs.org/~akpm/mmotm/broken-out/mm-page_poison-use-static-key-more-efficiently-fix.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Vlastimil Babka <vbabka@xxxxxxx> Subject: mm, page_poison: use static key more efficiently-fix Non-functional cleanups, per David Hildenbrand. Link: https://lkml.kernel.org/r/c4eb5301-0435-d296-5d32-a76ac58787b2@xxxxxxx Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx> Cc: David Hildenbrand <david@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/mm.h | 16 +++++++++++++--- mm/page_alloc.c | 7 +++---- mm/page_poison.c | 4 ++-- 3 files changed, 18 insertions(+), 9 deletions(-) --- a/include/linux/mm.h~mm-page_poison-use-static-key-more-efficiently-fix +++ a/include/linux/mm.h @@ -2852,8 +2852,8 @@ extern int apply_to_existing_page_range( extern void init_mem_debugging_and_hardening(void); #ifdef CONFIG_PAGE_POISONING -extern void kernel_poison_pages(struct page *page, int numpages); -extern void kernel_unpoison_pages(struct page *page, int numpages); +extern void __kernel_poison_pages(struct page *page, int numpages); +extern void __kernel_unpoison_pages(struct page *page, int numpages); extern bool _page_poisoning_enabled_early; DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled); static inline bool page_poisoning_enabled(void) @@ -2866,7 +2866,17 @@ static inline bool page_poisoning_enable */ static inline bool page_poisoning_enabled_static(void) { - return (static_branch_unlikely(&_page_poisoning_enabled)); + return static_branch_unlikely(&_page_poisoning_enabled); +} +static inline void kernel_poison_pages(struct page *page, int numpages) +{ + if (page_poisoning_enabled_static()) + __kernel_poison_pages(page, numpages); +} +static inline void kernel_unpoison_pages(struct page *page, int numpages) +{ + if (page_poisoning_enabled_static()) + __kernel_unpoison_pages(page, numpages); } #else static inline bool page_poisoning_enabled(void) { return false; } --- a/mm/page_alloc.c~mm-page_poison-use-static-key-more-efficiently-fix +++ a/mm/page_alloc.c @@ -1271,8 +1271,8 @@ static __always_inline bool free_pages_p if (want_init_on_free()) kernel_init_free_pages(page, 1 << order); - if (page_poisoning_enabled_static()) - kernel_poison_pages(page, 1 << order); + kernel_poison_pages(page, 1 << order); + /* * arch_free_page() can make the page's contents inaccessible. s390 * does this. So nothing which can access the page's contents should @@ -2287,8 +2287,7 @@ inline void post_alloc_hook(struct page arch_alloc_page(page, order); debug_pagealloc_map_pages(page, 1 << order); kasan_alloc_pages(page, order); - if (page_poisoning_enabled_static()) - kernel_unpoison_pages(page, 1 << order); + kernel_unpoison_pages(page, 1 << order); set_page_owner(page, order, gfp_flags); } --- a/mm/page_poison.c~mm-page_poison-use-static-key-more-efficiently-fix +++ a/mm/page_poison.c @@ -30,7 +30,7 @@ static void poison_page(struct page *pag kunmap_atomic(addr); } -void kernel_poison_pages(struct page *page, int n) +void __kernel_poison_pages(struct page *page, int n) { int i; @@ -89,7 +89,7 @@ static void unpoison_page(struct page *p kunmap_atomic(addr); } -void kernel_unpoison_pages(struct page *page, int n) +void __kernel_unpoison_pages(struct page *page, int n) { int i; _ Patches currently in -mm which might be from vbabka@xxxxxxx are mm-slub-use-kmem_cache_debug_flags-in-deactivate_slab.patch mm-page_alloc-do-not-rely-on-the-order-of-page_poison-and-init_on_alloc-free-parameters.patch mm-page_poison-use-static-key-more-efficiently.patch mm-page_poison-use-static-key-more-efficiently-fix.patch kernel-power-allow-hibernation-with-page_poison-sanity-checking.patch kernel-power-allow-hibernation-with-page_poison-sanity-checking-fix.patch mm-page_poison-remove-config_page_poisoning_no_sanity.patch mm-page_poison-remove-config_page_poisoning_zero.patch