Now that cma_release is non-blocking and irq safe, there is no need to drop hugetlb_lock before calling. Signed-off-by: Mike Kravetz <mike.kravetz@xxxxxxxxxx> Acked-by: Roman Gushchin <guro@xxxxxx> Acked-by: Michal Hocko <mhocko@xxxxxxxx> --- mm/hugetlb.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 3c3e4baa4156..1d62f0492e7b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1353,14 +1353,8 @@ static void update_and_free_page(struct hstate *h, struct page *page) set_compound_page_dtor(page, NULL_COMPOUND_DTOR); set_page_refcounted(page); if (hstate_is_gigantic(h)) { - /* - * Temporarily drop the hugetlb_lock, because - * we might block in free_gigantic_page(). - */ - spin_unlock(&hugetlb_lock); destroy_compound_gigantic_page(page, huge_page_order(h)); free_gigantic_page(page, huge_page_order(h)); - spin_lock(&hugetlb_lock); } else { __free_pages(page, huge_page_order(h)); } -- 2.30.2