When a hugetlb page allocated via cma_alloc is freed via cma_release the cma_release call may sleep. For now, only gigantic pages can be allocated via cma_alloc. The routine free_huge_page can not sleep, so it defers freeing of all gigantic pages to a workqueue. Use a new hugetlb page specific flag HPageCma to indicate the page was allocated via cma_alloc. This flag can be used so that only gigantic pages allocated via cma_alloc will have deferred freeing. Signed-off-by: Mike Kravetz <mike.kravetz@xxxxxxxxxx> --- include/linux/hugetlb.h | 7 +++++++ mm/hugetlb.c | 18 ++++++++++-------- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index a81ca39c06be..0aba6957a73a 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -498,12 +498,18 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, * modifications require hugetlb_lock. * HPG_freed - Set when page is on the free lists. * Synchronization: hugetlb_lock held for examination and modification. + * HPG_cma - Set if huge page was directly allocated from CMA area via + * cma_alloc. Initially set for gigantic page cma allocations, but can + * be set in non-gigantic pages if gigantic pages are demoted. + * Synchronization: Only accessed or modified when there is only one + * reference to the page at allocation or free time. */ enum hugetlb_page_flags { HPG_restore_reserve = 0, HPG_migratable, HPG_temporary, HPG_freed, + HPG_cma, __NR_HPAGEFLAGS, }; @@ -549,6 +555,7 @@ HPAGEFLAG(RestoreReserve, restore_reserve) HPAGEFLAG(Migratable, migratable) HPAGEFLAG(Temporary, temporary) HPAGEFLAG(Freed, freed) +HPAGEFLAG(Cma, cma) #ifdef CONFIG_HUGETLB_PAGE diff --git a/mm/hugetlb.c b/mm/hugetlb.c index b8304b290a73..5efff5ce337f 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1241,13 +1241,11 @@ static void destroy_compound_gigantic_page(struct page *page, static void free_gigantic_page(struct page *page, unsigned int order) { - /* - * If the page isn't allocated using the cma allocator, - * cma_release() returns false. - */ #ifdef CONFIG_CMA - if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order)) + if (HPageCma(page)) { + cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order); return; + } #endif free_contig_range(page_to_pfn(page), 1 << order); @@ -1269,8 +1267,10 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, if (hugetlb_cma[nid]) { page = cma_alloc(hugetlb_cma[nid], nr_pages, huge_page_order(h), true); - if (page) + if (page) { + SetHPageCma(page); return page; + } } if (!(gfp_mask & __GFP_THISNODE)) { @@ -1280,8 +1280,10 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, page = cma_alloc(hugetlb_cma[node], nr_pages, huge_page_order(h), true); - if (page) + if (page) { + SetHPageCma(page); return page; + } } } } @@ -1397,7 +1399,7 @@ static DECLARE_WORK(free_hpage_work, free_hpage_workfn); static bool free_page_may_sleep(struct hstate *h, struct page *page) { /* freeing gigantic pages in CMA may sleep */ - if (hstate_is_gigantic(h)) + if (HPageCma(page)) return true; return false; -- 2.30.2