In the later patch, we can use the free_vmemmap_page() to free the unused vmemmap pages and initialize a page for vmemmap page using via prepare_vmemmap_page(). Signed-off-by: Muchun Song <songmuchun@xxxxxxxxxxxxx> --- include/linux/bootmem_info.h | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h index 4ed6dee1adc9..ce9d8c97369d 100644 --- a/include/linux/bootmem_info.h +++ b/include/linux/bootmem_info.h @@ -3,6 +3,7 @@ #define __LINUX_BOOTMEM_INFO_H #include <linux/mmzone.h> +#include <linux/mm.h> /* * Types for free bootmem stored in page->lru.next. These have to be in @@ -22,6 +23,30 @@ void __init register_page_bootmem_info_node(struct pglist_data *pgdat); void get_page_bootmem(unsigned long info, struct page *page, unsigned long type); void put_page_bootmem(struct page *page); + +static inline void free_vmemmap_page(struct page *page) +{ + VM_WARN_ON(!PageReserved(page) || page_ref_count(page) != 2); + + /* bootmem page has reserved flag in the reserve_bootmem_region */ + if (PageReserved(page)) { + unsigned long magic = (unsigned long)page->freelist; + + if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) + put_page_bootmem(page); + else + WARN_ON(1); + } +} + +static inline void prepare_vmemmap_page(struct page *page) +{ + unsigned long section_nr = pfn_to_section_nr(page_to_pfn(page)); + + get_page_bootmem(section_nr, page, SECTION_INFO); + __SetPageReserved(page); + adjust_managed_page_count(page, -1); +} #else static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) { -- 2.11.0