When optimally demoting huge pages with vmemmap optimizations, the routines which destroy and prep hugetlb pages need to be modified. Currently, these routines expect all vmemmap pages to be present as they will write to all page structs for tail pages. To optimallly handle demotion of huge pages not all vmemmap pages will be present. Only those pages required for the demoted pages will be present. Therefore, the destroy and prep routines must only write to struct pages for which vmammap pages are present. Modify destroy_compound_gigantic_page_for_demote and prep_compound_gigantic_page_for_demote to take vmemmap optimized pages into account. Use the hugetlb specific flag HPageVmemmapOptimized to determine of this special processing is needed. These modifications will be used in subsequent patches where vmemmap optimizations for demote are fully enabled. Also modify the routine free_huge_page_vmemmap to immediately return if the passed page is already optimized. With demotion, prep_new_huge_page can be called for vmemmap optimized pages. Signed-off-by: Mike Kravetz <mike.kravetz@xxxxxxxxxx> --- mm/hugetlb.c | 17 +++++++++++++++-- mm/hugetlb_vmemmap.c | 12 ++---------- mm/hugetlb_vmemmap.h | 10 ++++++++++ 3 files changed, 27 insertions(+), 12 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 9bbfeba3e1ae..4040dd3c6fe3 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1255,12 +1255,18 @@ static void __destroy_compound_gigantic_page(struct page *page, unsigned int order, bool demote) { int i; - int nr_pages = 1 << order; + int nr_pages; struct page *p = page + 1; atomic_set(compound_mapcount_ptr(page), 0); atomic_set(compound_pincount_ptr(page), 0); + if (demote && HPageVmemmapOptimized(page)) { + clear_compound_head(page); + nr_pages = RESERVE_VMEMMAP_SIZE / sizeof(struct page); + } else + nr_pages = 1 << order; + for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { p->mapping = NULL; clear_compound_head(p); @@ -1517,6 +1523,7 @@ static void __update_and_free_page(struct hstate *h, struct page *page) return; } #endif + prep_compound_page(page, huge_page_order(h)); __free_pages(page, huge_page_order(h)); } } @@ -1705,9 +1712,14 @@ static bool __prep_compound_gigantic_page(struct page *page, unsigned int order, bool demote) { int i, j; - int nr_pages = 1 << order; + int nr_pages; struct page *p = page + 1; + if (demote && HPageVmemmapOptimized(page)) + nr_pages = RESERVE_VMEMMAP_SIZE / sizeof(struct page); + else + nr_pages = 1 << order; + /* we rely on prep_new_huge_page to set the destructor */ set_compound_order(page, order); __ClearPageReserved(page); @@ -1749,6 +1761,7 @@ static bool __prep_compound_gigantic_page(struct page *page, unsigned int order, } else { VM_BUG_ON_PAGE(page_count(p), p); } + p->mapping = TAIL_MAPPING; set_compound_head(p, page); } atomic_set(compound_mapcount_ptr(page), -1); diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index c82d60398c16..01c2cc959824 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -172,16 +172,6 @@ #include "hugetlb_vmemmap.h" -/* - * There are a lot of struct page structures associated with each HugeTLB page. - * For tail pages, the value of compound_head is the same. So we can reuse first - * page of tail page structures. We map the virtual addresses of the remaining - * pages of tail page structures to the first tail page struct, and then free - * these page frames. Therefore, we need to reserve two pages as vmemmap areas. - */ -#define RESERVE_VMEMMAP_NR 2U -#define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT) - bool hugetlb_free_vmemmap_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON); static int __init early_hugetlb_free_vmemmap_param(char *buf) @@ -250,6 +240,8 @@ void free_huge_page_vmemmap(struct hstate *h, struct page *head) if (!free_vmemmap_pages_per_hpage(h)) return; + if (HPageVmemmapOptimized(head)) /* possible for demote */ + return; vmemmap_addr += RESERVE_VMEMMAP_SIZE; vmemmap_end = vmemmap_addr + free_vmemmap_pages_size_per_hpage(h); diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h index 44382504efc3..36274bf0256c 100644 --- a/mm/hugetlb_vmemmap.h +++ b/mm/hugetlb_vmemmap.h @@ -10,6 +10,16 @@ #define _LINUX_HUGETLB_VMEMMAP_H #include <linux/hugetlb.h> +/* + * There are a lot of struct page structures associated with each HugeTLB page. + * For tail pages, the value of compound_head is the same. So we can reuse first + * page of tail page structures. We map the virtual addresses of the remaining + * pages of tail page structures to the first tail page struct, and then free + * these page frames. Therefore, we need to reserve two pages as vmemmap areas. + */ +#define RESERVE_VMEMMAP_NR 2U +#define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT) + #ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP int alloc_huge_page_vmemmap(struct hstate *h, struct page *head); void free_huge_page_vmemmap(struct hstate *h, struct page *head); -- 2.31.1