On Mon, May 08, 2023 at 03:11:50PM +0800, Kefeng Wang wrote: > Since commit f2fc4b44ec2b ("mm: move init_mem_debugging_and_hardening() > to mm/mm_init.c"), the init_on_alloc() and init_on_free() define is > better to move there too. > > Signed-off-by: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx> Reviewed-by: Mike Rapoport (IBM) <rppt@xxxxxxxxxx> > --- > mm/mm_init.c | 6 ++++++ > mm/page_alloc.c | 5 ----- > 2 files changed, 6 insertions(+), 5 deletions(-) > > diff --git a/mm/mm_init.c b/mm/mm_init.c > index da162b7a044c..15201887f8e0 100644 > --- a/mm/mm_init.c > +++ b/mm/mm_init.c > @@ -2543,6 +2543,12 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn, > __free_pages_core(page, order); > } > > +DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); > +EXPORT_SYMBOL(init_on_alloc); > + > +DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); > +EXPORT_SYMBOL(init_on_free); > + > static bool _init_on_alloc_enabled_early __read_mostly > = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON); > static int __init early_init_on_alloc(char *buf) > diff --git a/mm/page_alloc.c b/mm/page_alloc.c > index d1086aeca8f2..4f094ba7c8fb 100644 > --- a/mm/page_alloc.c > +++ b/mm/page_alloc.c > @@ -233,11 +233,6 @@ unsigned long totalcma_pages __read_mostly; > > int percpu_pagelist_high_fraction; > gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; > -DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); > -EXPORT_SYMBOL(init_on_alloc); > - > -DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); > -EXPORT_SYMBOL(init_on_free); > > /* > * A cached value of the page's pageblock's migratetype, used when the page is > -- > 2.35.3 >