The patch titled Subject: mm, page_poison: use static key more efficiently has been removed from the -mm tree. Its filename was mm-page_poison-use-static-key-more-efficiently.patch This patch was dropped because an updated version will be merged ------------------------------------------------------ From: Vlastimil Babka <vbabka@xxxxxxx> Subject: mm, page_poison: use static key more efficiently Commit 11c9c7edae06 ("mm/page_poison.c: replace bool variable with static key") changed page_poisoning_enabled() to a static key check. However, the function is not inlined, so each check still involves a function call with overhead not eliminated when page poisoning is disabled. Analogically to how debug_pagealloc is handled, this patch converts page_poisoning_enabled() back to boolean check, and introduces page_poisoning_enabled_static() for fast paths. Both functions are inlined. Also optimize the check that enables page poisoning instead of debug_pagealloc for architectures without proper debug_pagealloc support. Move the check to init_mem_debugging() to enable a single static key instead of having two static branches in page_poisoning_enabled_static(). Link: https://lkml.kernel.org/r/20201026173358.14704-3-vbabka@xxxxxxx Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx> Cc: Alexander Potapenko <glider@xxxxxxxxxx> Cc: David Hildenbrand <david@xxxxxxxxxx> Cc: Kees Cook <keescook@xxxxxxxxxxxx> Cc: Mateusz Nosek <mateusznosek0@xxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxxxx> Cc: Mike Rapoport <rppt@xxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- drivers/virtio/virtio_balloon.c | 2 - include/linux/mm.h | 16 +++++++++++- mm/page_alloc.c | 13 +++++++++ mm/page_poison.c | 40 ++++-------------------------- 4 files changed, 34 insertions(+), 37 deletions(-) --- a/drivers/virtio/virtio_balloon.c~mm-page_poison-use-static-key-more-efficiently +++ a/drivers/virtio/virtio_balloon.c @@ -1116,7 +1116,7 @@ static int virtballoon_validate(struct v */ if (!want_init_on_free() && (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY) || - !page_poisoning_enabled())) + !page_poisoning_enabled_static())) __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_PAGE_POISON); else if (!virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_REPORTING); --- a/include/linux/mm.h~mm-page_poison-use-static-key-more-efficiently +++ a/include/linux/mm.h @@ -2855,10 +2855,24 @@ extern int apply_to_existing_page_range( extern void init_mem_debugging(void); #ifdef CONFIG_PAGE_POISONING -extern bool page_poisoning_enabled(void); extern void kernel_poison_pages(struct page *page, int numpages, int enable); +extern bool _page_poisoning_enabled_early; +DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled); +static inline bool page_poisoning_enabled(void) +{ + return _page_poisoning_enabled_early; +} +/* + * For use in fast paths after init_mem_debugging() has run, or when a + * false negative result is not harmful when called too early. + */ +static inline bool page_poisoning_enabled_static(void) +{ + return (static_branch_unlikely(&_page_poisoning_enabled)); +} #else static inline bool page_poisoning_enabled(void) { return false; } +static inline bool page_poisoning_enabled_static(void) { return false; } static inline void kernel_poison_pages(struct page *page, int numpages, int enable) { } #endif --- a/mm/page_alloc.c~mm-page_poison-use-static-key-more-efficiently +++ a/mm/page_alloc.c @@ -777,6 +777,17 @@ void init_mem_debugging(void) } } +#ifdef CONFIG_PAGE_POISONING + /* + * Page poisoning is debug page alloc for some arches. If + * either of those options are enabled, enable poisoning. + */ + if (page_poisoning_enabled() || + (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && + debug_pagealloc_enabled())) + static_branch_enable(&_page_poisoning_enabled); +#endif + #ifdef CONFIG_DEBUG_PAGEALLOC if (!debug_pagealloc_enabled()) return; @@ -2208,7 +2219,7 @@ static inline int check_new_page(struct static inline bool free_pages_prezeroed(void) { return (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) && - page_poisoning_enabled()) || want_init_on_free(); + page_poisoning_enabled_static()) || want_init_on_free(); } #ifdef CONFIG_DEBUG_VM --- a/mm/page_poison.c~mm-page_poison-use-static-key-more-efficiently +++ a/mm/page_poison.c @@ -8,45 +8,17 @@ #include <linux/ratelimit.h> #include <linux/kasan.h> -static DEFINE_STATIC_KEY_FALSE_RO(want_page_poisoning); +bool _page_poisoning_enabled_early; +EXPORT_SYMBOL(_page_poisoning_enabled_early); +DEFINE_STATIC_KEY_FALSE_RO(_page_poisoning_enabled); +EXPORT_SYMBOL(_page_poisoning_enabled); static int __init early_page_poison_param(char *buf) { - int ret; - bool tmp; - - ret = strtobool(buf, &tmp); - if (ret) - return ret; - - if (tmp) - static_branch_enable(&want_page_poisoning); - else - static_branch_disable(&want_page_poisoning); - - return 0; + return kstrtobool(buf, &_page_poisoning_enabled_early); } early_param("page_poison", early_page_poison_param); -/** - * page_poisoning_enabled - check if page poisoning is enabled - * - * Return true if page poisoning is enabled, or false if not. - */ -bool page_poisoning_enabled(void) -{ - /* - * Assumes that debug_pagealloc_enabled is set before - * memblock_free_all. - * Page poisoning is debug page alloc for some arches. If - * either of those options are enabled, enable poisoning. - */ - return (static_branch_unlikely(&want_page_poisoning) || - (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && - debug_pagealloc_enabled())); -} -EXPORT_SYMBOL_GPL(page_poisoning_enabled); - static void poison_page(struct page *page) { void *addr = kmap_atomic(page); @@ -127,7 +99,7 @@ static void unpoison_pages(struct page * void kernel_poison_pages(struct page *page, int numpages, int enable) { - if (!page_poisoning_enabled()) + if (!page_poisoning_enabled_static()) return; if (enable) _ Patches currently in -mm which might be from vbabka@xxxxxxx are mm-slub-use-kmem_cache_debug_flags-in-deactivate_slab.patch mm-page_alloc-reduce-static-keys-in-prep_new_page.patch