In the subsequent patch, we will allocate the vmemmap pages when free HugeTLB pages. But update_and_free_page() is called from a non-task context(and hold hugetlb_lock), so we can defer the actual freeing in a workqueue to prevent use GFP_ATOMIC to allocate the vmemmap pages. Signed-off-by: Muchun Song <songmuchun@xxxxxxxxxxxxx> --- mm/hugetlb.c | 96 ++++++++++++++++++++++++++++++++++++++++++++++------ mm/hugetlb_vmemmap.c | 5 --- mm/hugetlb_vmemmap.h | 10 ++++++ 3 files changed, 95 insertions(+), 16 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 93dee37ceb6d..5131ae3d2245 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1220,7 +1220,7 @@ static void destroy_compound_gigantic_page(struct page *page, __ClearPageHead(page); } -static void free_gigantic_page(struct page *page, unsigned int order) +static void __free_gigantic_page(struct page *page, unsigned int order) { /* * If the page isn't allocated using the cma allocator, @@ -1287,20 +1287,100 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, { return NULL; } -static inline void free_gigantic_page(struct page *page, unsigned int order) { } +static inline void __free_gigantic_page(struct page *page, + unsigned int order) { } static inline void destroy_compound_gigantic_page(struct page *page, unsigned int order) { } #endif -static void update_and_free_page(struct hstate *h, struct page *page) +static void __free_hugepage(struct hstate *h, struct page *page); + +/* + * As update_and_free_page() is be called from a non-task context(and hold + * hugetlb_lock), we can defer the actual freeing in a workqueue to prevent + * use GFP_ATOMIC to allocate a lot of vmemmap pages. + * + * update_hpage_vmemmap_workfn() locklessly retrieves the linked list of + * pages to be freed and frees them one-by-one. As the page->mapping pointer + * is going to be cleared in update_hpage_vmemmap_workfn() anyway, it is + * reused as the llist_node structure of a lockless linked list of huge + * pages to be freed. + */ +static LLIST_HEAD(hpage_update_freelist); + +static void update_hpage_vmemmap_workfn(struct work_struct *work) { - int i; + struct llist_node *node; + struct page *page; + + node = llist_del_all(&hpage_update_freelist); + + while (node) { + page = container_of((struct address_space **)node, + struct page, mapping); + node = node->next; + page->mapping = NULL; + __free_hugepage(page_hstate(page), page); + cond_resched(); + } +} +static DECLARE_WORK(hpage_update_work, update_hpage_vmemmap_workfn); + +static inline void __update_and_free_page(struct hstate *h, struct page *page) +{ + /* No need to allocate vmemmap pages */ + if (!free_vmemmap_pages_per_hpage(h)) { + __free_hugepage(h, page); + return; + } + + /* + * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap + * pages. + * + * Only call schedule_work() if hpage_update_freelist is previously + * empty. Otherwise, schedule_work() had been called but the workfn + * hasn't retrieved the list yet. + */ + if (llist_add((struct llist_node *)&page->mapping, + &hpage_update_freelist)) + schedule_work(&hpage_update_work); +} + +#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP +static inline void free_gigantic_page(struct hstate *h, struct page *page) +{ + __free_gigantic_page(page, huge_page_order(h)); +} +#else +static inline void free_gigantic_page(struct hstate *h, struct page *page) +{ + /* + * Temporarily drop the hugetlb_lock, because + * we might block in __free_gigantic_page(). + */ + spin_unlock(&hugetlb_lock); + __free_gigantic_page(page, huge_page_order(h)); + spin_lock(&hugetlb_lock); +} +#endif + +static void update_and_free_page(struct hstate *h, struct page *page) +{ if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) return; h->nr_huge_pages--; h->nr_huge_pages_node[page_to_nid(page)]--; + + __update_and_free_page(h, page); +} + +static void __free_hugepage(struct hstate *h, struct page *page) +{ + int i; + for (i = 0; i < pages_per_huge_page(h); i++) { page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | 1 << PG_dirty | @@ -1312,14 +1392,8 @@ static void update_and_free_page(struct hstate *h, struct page *page) set_compound_page_dtor(page, NULL_COMPOUND_DTOR); set_page_refcounted(page); if (hstate_is_gigantic(h)) { - /* - * Temporarily drop the hugetlb_lock, because - * we might block in free_gigantic_page(). - */ - spin_unlock(&hugetlb_lock); destroy_compound_gigantic_page(page, huge_page_order(h)); - free_gigantic_page(page, huge_page_order(h)); - spin_lock(&hugetlb_lock); + free_gigantic_page(h, page); } else { __free_pages(page, huge_page_order(h)); } diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index 2c997b5de3b6..af42fad1f131 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -124,11 +124,6 @@ (__boundary - 1 < (end) - 1) ? __boundary : (end); \ }) -static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h) -{ - return h->nr_free_vmemmap_pages; -} - static inline unsigned int vmemmap_pages_per_hpage(struct hstate *h) { return free_vmemmap_pages_per_hpage(h) + RESERVE_VMEMMAP_NR; diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h index 67113b67495f..293897b9f1d8 100644 --- a/mm/hugetlb_vmemmap.h +++ b/mm/hugetlb_vmemmap.h @@ -13,6 +13,11 @@ #ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP void __init hugetlb_vmemmap_init(struct hstate *h); void free_huge_page_vmemmap(struct hstate *h, struct page *head); + +static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h) +{ + return h->nr_free_vmemmap_pages; +} #else static inline void hugetlb_vmemmap_init(struct hstate *h) { @@ -21,5 +26,10 @@ static inline void hugetlb_vmemmap_init(struct hstate *h) static inline void free_huge_page_vmemmap(struct hstate *h, struct page *head) { } + +static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h) +{ + return 0; +} #endif /* CONFIG_HUGETLB_PAGE_FREE_VMEMMAP */ #endif /* _LINUX_HUGETLB_VMEMMAP_H */ -- 2.11.0