On Fri, Sep 24, 2021 at 6:24 AM Barry Song <21cnbao@xxxxxxxxx> wrote: > > On Wed, Sep 22, 2021 at 10:27 PM Muchun Song <songmuchun@xxxxxxxxxxxxx> wrote: > > > > The page_head_if_fake() is used throughout memory management and the > > conditional check requires checking a global variable, although the > > overhead of this check may be small, it increases when the memory > > cache comes under pressure. Also, the global variable will not be > > modified after system boot, so it is very appropriate to use static > > key machanism. > > > > Signed-off-by: Muchun Song <songmuchun@xxxxxxxxxxxxx> > > --- > > include/linux/hugetlb.h | 10 ++++++++-- > > include/linux/page-flags.h | 6 ++++-- > > mm/hugetlb_vmemmap.c | 12 ++++++------ > > mm/memory_hotplug.c | 2 +- > > 4 files changed, 19 insertions(+), 11 deletions(-) > > > > diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h > > index 1faebe1cd0ed..4cc647a5dbf8 100644 > > --- a/include/linux/hugetlb.h > > +++ b/include/linux/hugetlb.h > > @@ -1066,9 +1066,15 @@ static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr > > #endif /* CONFIG_HUGETLB_PAGE */ > > > > #ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP > > -extern bool hugetlb_free_vmemmap_enabled; > > +static inline bool hugetlb_free_vmemmap_enabled(void) > > +{ > > + return static_key_enabled(&hugetlb_free_vmemmap_enabled_key); > > could it be > if (static_branch_maybe(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON, > &hugetlb_free_vmemmap_enabled_key)) > > then we are able to remove the duplication in page_fixed_fake_head()? Definitely. I'll update in the next version. Thanks. > > > +} > > #else > > -#define hugetlb_free_vmemmap_enabled false > > +static inline bool hugetlb_free_vmemmap_enabled(void) > > +{ > > + return false; > > +} > > #endif > > > > static inline spinlock_t *huge_pte_lock(struct hstate *h, > > diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h > > index b47a7f51d2c3..54e119e44496 100644 > > --- a/include/linux/page-flags.h > > +++ b/include/linux/page-flags.h > > @@ -185,7 +185,8 @@ enum pageflags { > > #ifndef __GENERATING_BOUNDS_H > > > > #ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP > > -extern bool hugetlb_free_vmemmap_enabled; > > +DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON, > > + hugetlb_free_vmemmap_enabled_key); > > > > /* > > * If the feature of freeing some vmemmap pages associated with each HugeTLB > > @@ -205,7 +206,8 @@ extern bool hugetlb_free_vmemmap_enabled; > > */ > > static __always_inline const struct page *page_fixed_fake_head(const struct page *page) > > { > > - if (!hugetlb_free_vmemmap_enabled) > > + if (!static_branch_maybe(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON, > > + &hugetlb_free_vmemmap_enabled_key)) > > return page; > > /* > > * Only addresses aligned with PAGE_SIZE of struct page may be fake head > > diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c > > index 527bcaa44a48..47517e878ed5 100644 > > --- a/mm/hugetlb_vmemmap.c > > +++ b/mm/hugetlb_vmemmap.c > > @@ -188,9 +188,9 @@ > > #define RESERVE_VMEMMAP_NR 1U > > #define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT) > > > > -bool hugetlb_free_vmemmap_enabled __read_mostly = > > - IS_ENABLED(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON); > > -EXPORT_SYMBOL(hugetlb_free_vmemmap_enabled); > > +DEFINE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON, > > + hugetlb_free_vmemmap_enabled_key); > > +EXPORT_SYMBOL(hugetlb_free_vmemmap_enabled_key); > > > > static int __init early_hugetlb_free_vmemmap_param(char *buf) > > { > > @@ -204,9 +204,9 @@ static int __init early_hugetlb_free_vmemmap_param(char *buf) > > return -EINVAL; > > > > if (!strcmp(buf, "on")) > > - hugetlb_free_vmemmap_enabled = true; > > + static_branch_enable(&hugetlb_free_vmemmap_enabled_key); > > else if (!strcmp(buf, "off")) > > - hugetlb_free_vmemmap_enabled = false; > > + static_branch_disable(&hugetlb_free_vmemmap_enabled_key); > > else > > return -EINVAL; > > > > @@ -284,7 +284,7 @@ void __init hugetlb_vmemmap_init(struct hstate *h) > > BUILD_BUG_ON(__NR_USED_SUBPAGE >= > > RESERVE_VMEMMAP_SIZE / sizeof(struct page)); > > > > - if (!hugetlb_free_vmemmap_enabled) > > + if (!hugetlb_free_vmemmap_enabled()) > > return; > > > > vmemmap_pages = (nr_pages * sizeof(struct page)) >> PAGE_SHIFT; > > diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c > > index 0488eed3327c..89c1fde02162 100644 > > --- a/mm/memory_hotplug.c > > +++ b/mm/memory_hotplug.c > > @@ -1341,7 +1341,7 @@ bool mhp_supports_memmap_on_memory(unsigned long size) > > * populate a single PMD. > > */ > > return memmap_on_memory && > > - !hugetlb_free_vmemmap_enabled && > > + !hugetlb_free_vmemmap_enabled() && > > IS_ENABLED(CONFIG_MHP_MEMMAP_ON_MEMORY) && > > size == memory_block_size_bytes() && > > IS_ALIGNED(vmemmap_size, PMD_SIZE) && > > -- > > 2.11.0 > > > > Thanks > barry