When adding hugetlb pages to the pool, we first create a list of the allocated pages before adding to the pool. Pass this list of pages to a new routine hugetlb_vmemmap_optimize_folios() for vmemmap optimization. We also modify the routine vmemmap_should_optimize() to check for pages that are already optimized. There are code paths that might request vmemmap optimization twice and we want to make sure this is not attempted. Signed-off-by: Mike Kravetz <mike.kravetz@xxxxxxxxxx> Reviewed-by: Muchun Song <songmuchun@xxxxxxxxxxxxx> --- mm/hugetlb.c | 5 +++++ mm/hugetlb_vmemmap.c | 11 +++++++++++ mm/hugetlb_vmemmap.h | 5 +++++ 3 files changed, 21 insertions(+) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 77313c9e0fa8..214603898ad0 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2248,6 +2248,11 @@ static void prep_and_add_allocated_folios(struct hstate *h, { struct folio *folio, *tmp_f; + /* + * Send list for bulk vmemmap optimization processing + */ + hugetlb_vmemmap_optimize_folios(h, folio_list); + /* * Add all new pool pages to free lists in one lock cycle */ diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index 0bde38626d25..c17784f36dc3 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -483,6 +483,9 @@ int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head) /* Return true iff a HugeTLB whose vmemmap should and can be optimized. */ static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head) { + if (HPageVmemmapOptimized((struct page *)head)) + return false; + if (!READ_ONCE(vmemmap_optimize_enabled)) return false; @@ -572,6 +575,14 @@ void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head) SetHPageVmemmapOptimized(head); } +void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list) +{ + struct folio *folio; + + list_for_each_entry(folio, folio_list, lru) + hugetlb_vmemmap_optimize(h, &folio->page); +} + static struct ctl_table hugetlb_vmemmap_sysctls[] = { { .procname = "hugetlb_optimize_vmemmap", diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h index 25bd0e002431..036494e040ca 100644 --- a/mm/hugetlb_vmemmap.h +++ b/mm/hugetlb_vmemmap.h @@ -13,6 +13,7 @@ #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head); void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head); +void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list); /* * Reserve one vmemmap page, all vmemmap addresses are mapped to it. See @@ -47,6 +48,10 @@ static inline void hugetlb_vmemmap_optimize(const struct hstate *h, struct page { } +static inline void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list) +{ +} + static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h) { return 0; -- 2.41.0