在 2025/3/3 21:45, David Hildenbrand 写道:
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2135,6 +2135,8 @@ int dissolve_free_hugetlb_folio(struct folio
*folio)
if (!folio_ref_count(folio)) {
struct hstate *h = folio_hstate(folio);
+ bool adjust_surplus = false;
+
if (!available_huge_pages(h))
goto out;
@@ -2157,7 +2159,9 @@ int dissolve_free_hugetlb_folio(struct folio
*folio)
goto retry;
}
- remove_hugetlb_folio(h, folio, false);
+ if (h->surplus_huge_pages_node[folio_nid(folio)])
+ adjust_surplus = true;
This change looks good to me
+ remove_hugetlb_folio(h, folio, adjust_surplus);
h->max_huge_pages--;
spin_unlock_irq(&hugetlb_lock);
@@ -2177,7 +2181,7 @@ int dissolve_free_hugetlb_folio(struct folio
*folio)
rc = hugetlb_vmemmap_restore_folio(h, folio);
if (rc) {
spin_lock_irq(&hugetlb_lock);
- add_hugetlb_folio(h, folio, false);
+ add_hugetlb_folio(h, folio, adjust_surplus);
I'm not quite sure here, though. We dropped the hugetlb_lock, can't
some weird concurrent action result in us not having to adjust surplus
page anymore?
In this case, we will get a free surplus folio without reserved.
The existing code has similar logic. In free_huge_folio(),when
h->surplus_huge_pages_node[nid] != 0,
update_and_free_hugetlb_folio->__update_and_free_hugetlb_folio
may fail due tohugetlb_vmemmap_restore_folio() too, and will treat it as
surplus folio.