The patch titled Subject: mm/hugetlb: better checks before using hugetlb_cma has been removed from the -mm tree. Its filename was mm-hugetlb-avoid-hardcoding-while-checking-if-cma-is-enabled-fix-fix.patch This patch was dropped because it was folded into mm-hugetlb-avoid-hardcoding-while-checking-if-cma-is-enabled.patch ------------------------------------------------------ From: Stephen Rothwell <sfr@xxxxxxxxxxxxxxxx> Subject: mm/hugetlb: better checks before using hugetlb_cma Link: http://lkml.kernel.org/r/20200721205716.6dbaa56b@xxxxxxxxxxxxxxxx Signed-off-by: Stephen Rothwell <sfr@xxxxxxxxxxxxxxxx> Acked-by: Mike Kravetz <mike.kravetz@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/hugetlb.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) --- a/mm/hugetlb.c~mm-hugetlb-avoid-hardcoding-while-checking-if-cma-is-enabled-fix-fix +++ a/mm/hugetlb.c @@ -1238,9 +1238,10 @@ static void free_gigantic_page(struct pa * If the page isn't allocated using the cma allocator, * cma_release() returns false. */ - if (IS_ENABLED(CONFIG_CMA) && - cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order)) +#ifdef CONFIG_CMA + if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order)) return; +#endif free_contig_range(page_to_pfn(page), 1 << order); } @@ -1251,7 +1252,8 @@ static struct page *alloc_gigantic_page( { unsigned long nr_pages = 1UL << huge_page_order(h); - if (IS_ENABLED(CONFIG_CMA)) { +#ifdef CONFIG_CMA + { struct page *page; int node; @@ -1265,6 +1267,7 @@ static struct page *alloc_gigantic_page( return page; } } +#endif return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask); } _ Patches currently in -mm which might be from sfr@xxxxxxxxxxxxxxxx are mm-hugetlb-avoid-hardcoding-while-checking-if-cma-is-enabled.patch