The quilt patch titled Subject: mm/page_alloc: add helper for checking if check_pages_enabled has been removed from the -mm tree. Its filename was mm-page_alloc-add-helper-for-checking-if-check_pages_enabled.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: "Mike Rapoport (IBM)" <rppt@xxxxxxxxxx> Subject: mm/page_alloc: add helper for checking if check_pages_enabled Date: Tue, 21 Mar 2023 19:05:01 +0200 Instead of duplicating long static_branch_enabled(&check_pages_enabled) wrap it in a helper function is_check_pages_enabled() Link: https://lkml.kernel.org/r/20230321170513.2401534-3-rppt@xxxxxxxxxx Signed-off-by: Mike Rapoport (IBM) <rppt@xxxxxxxxxx> Reviewed-by: David Hildenbrand <david@xxxxxxxxxx> Reviewed-by: Vlastimil Babka <vbabka@xxxxxxx> Cc: Doug Berger <opendmb@xxxxxxxxx> Cc: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Cc: Mel Gorman <mgorman@xxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxxxx> Cc: Thomas Bogendoerfer <tsbogend@xxxxxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/page_alloc.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) --- a/mm/page_alloc.c~mm-page_alloc-add-helper-for-checking-if-check_pages_enabled +++ a/mm/page_alloc.c @@ -245,6 +245,11 @@ EXPORT_SYMBOL(init_on_free); /* perform sanity checks on struct pages being allocated or freed */ static DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled); +static inline bool is_check_pages_enabled(void) +{ + return static_branch_unlikely(&check_pages_enabled); +} + static bool _init_on_alloc_enabled_early __read_mostly = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON); static int __init early_init_on_alloc(char *buf) @@ -1450,7 +1455,7 @@ static __always_inline bool free_pages_p for (i = 1; i < (1 << order); i++) { if (compound) bad += free_tail_pages_check(page, page + i); - if (static_branch_unlikely(&check_pages_enabled)) { + if (is_check_pages_enabled()) { if (unlikely(free_page_is_bad(page + i))) { bad++; continue; @@ -1463,7 +1468,7 @@ static __always_inline bool free_pages_p page->mapping = NULL; if (memcg_kmem_online() && PageMemcgKmem(page)) __memcg_kmem_uncharge_page(page, order); - if (static_branch_unlikely(&check_pages_enabled)) { + if (is_check_pages_enabled()) { if (free_page_is_bad(page)) bad++; if (bad) @@ -2373,7 +2378,7 @@ static int check_new_page(struct page *p static inline bool check_new_pages(struct page *page, unsigned int order) { - if (static_branch_unlikely(&check_pages_enabled)) { + if (is_check_pages_enabled()) { for (int i = 0; i < (1 << order); i++) { struct page *p = page + i; _ Patches currently in -mm which might be from rppt@xxxxxxxxxx are arm-reword-arch_force_max_order-prompt-and-help-text.patch arm64-drop-ranges-in-definition-of-arch_force_max_order.patch arm64-reword-arch_force_max_order-prompt-and-help-text.patch arm64-reword-arch_force_max_order-prompt-and-help-text-v3.patch csky-drop-arch_force_max_order.patch ia64-dont-allow-users-to-override-arch_force_max_order.patch m68k-reword-arch_force_max_order-prompt-and-help-text.patch nios2-reword-arch_force_max_order-prompt-and-help-text.patch nios2-drop-ranges-for-definition-of-arch_force_max_order.patch powerpc-reword-arch_force_max_order-prompt-and-help-text.patch powerpc-drop-ranges-for-definition-of-arch_force_max_order.patch sh-reword-arch_force_max_order-prompt-and-help-text.patch sh-reword-arch_force_max_order-prompt-and-help-text-v3.patch sh-drop-ranges-for-definition-of-arch_force_max_order.patch sh-drop-ranges-for-definition-of-arch_force_max_order-v3.patch sparc-reword-arch_force_max_order-prompt-and-help-text.patch xtensa-reword-arch_force_max_order-prompt-and-help-text.patch mm-move-free_area_empty-to-mm-internalh.patch