On 14/08/2024 12:23, Barry Song wrote: > On Wed, Aug 14, 2024 at 11:20 PM Usama Arif <usamaarif642@xxxxxxxxx> wrote: >> >> >> >> On 14/08/2024 12:10, Barry Song wrote: >>> On Wed, Aug 14, 2024 at 12:03 AM Usama Arif <usamaarif642@xxxxxxxxx> wrote: >>>> >>>> Currently folio->_deferred_list is used to keep track of >>>> partially_mapped folios that are going to be split under memory >>>> pressure. In the next patch, all THPs that are faulted in and collapsed >>>> by khugepaged are also going to be tracked using _deferred_list. >>>> >>>> This patch introduces a pageflag to be able to distinguish between >>>> partially mapped folios and others in the deferred_list at split time in >>>> deferred_split_scan. Its needed as __folio_remove_rmap decrements >>>> _mapcount, _large_mapcount and _entire_mapcount, hence it won't be >>>> possible to distinguish between partially mapped folios and others in >>>> deferred_split_scan. >>>> >>>> Eventhough it introduces an extra flag to track if the folio is >>>> partially mapped, there is no functional change intended with this >>>> patch and the flag is not useful in this patch itself, it will >>>> become useful in the next patch when _deferred_list has non partially >>>> mapped folios. >>>> >>>> Signed-off-by: Usama Arif <usamaarif642@xxxxxxxxx> >>>> --- >>>> include/linux/huge_mm.h | 4 ++-- >>>> include/linux/page-flags.h | 3 +++ >>>> mm/huge_memory.c | 21 +++++++++++++-------- >>>> mm/hugetlb.c | 1 + >>>> mm/internal.h | 4 +++- >>>> mm/memcontrol.c | 3 ++- >>>> mm/migrate.c | 3 ++- >>>> mm/page_alloc.c | 5 +++-- >>>> mm/rmap.c | 3 ++- >>>> mm/vmscan.c | 3 ++- >>>> 10 files changed, 33 insertions(+), 17 deletions(-) >>>> >>>> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h >>>> index 4c32058cacfe..969f11f360d2 100644 >>>> --- a/include/linux/huge_mm.h >>>> +++ b/include/linux/huge_mm.h >>>> @@ -321,7 +321,7 @@ static inline int split_huge_page(struct page *page) >>>> { >>>> return split_huge_page_to_list_to_order(page, NULL, 0); >>>> } >>>> -void deferred_split_folio(struct folio *folio); >>>> +void deferred_split_folio(struct folio *folio, bool partially_mapped); >>>> >>>> void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, >>>> unsigned long address, bool freeze, struct folio *folio); >>>> @@ -495,7 +495,7 @@ static inline int split_huge_page(struct page *page) >>>> { >>>> return 0; >>>> } >>>> -static inline void deferred_split_folio(struct folio *folio) {} >>>> +static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {} >>>> #define split_huge_pmd(__vma, __pmd, __address) \ >>>> do { } while (0) >>>> >>>> diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h >>>> index a0a29bd092f8..cecc1bad7910 100644 >>>> --- a/include/linux/page-flags.h >>>> +++ b/include/linux/page-flags.h >>>> @@ -182,6 +182,7 @@ enum pageflags { >>>> /* At least one page in this folio has the hwpoison flag set */ >>>> PG_has_hwpoisoned = PG_active, >>>> PG_large_rmappable = PG_workingset, /* anon or file-backed */ >>>> + PG_partially_mapped, /* was identified to be partially mapped */ >>>> }; >>>> >>>> #define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1) >>>> @@ -861,8 +862,10 @@ static inline void ClearPageCompound(struct page *page) >>>> ClearPageHead(page); >>>> } >>>> FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE) >>>> +FOLIO_FLAG(partially_mapped, FOLIO_SECOND_PAGE) >>>> #else >>>> FOLIO_FLAG_FALSE(large_rmappable) >>>> +FOLIO_FLAG_FALSE(partially_mapped) >>>> #endif >>>> >>>> #define PG_head_mask ((1UL << PG_head)) >>>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c >>>> index 6df0e9f4f56c..c024ab0f745c 100644 >>>> --- a/mm/huge_memory.c >>>> +++ b/mm/huge_memory.c >>>> @@ -3397,6 +3397,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, >>>> * page_deferred_list. >>>> */ >>>> list_del_init(&folio->_deferred_list); >>>> + folio_clear_partially_mapped(folio); >>>> } >>>> spin_unlock(&ds_queue->split_queue_lock); >>>> if (mapping) { >>>> @@ -3453,11 +3454,12 @@ void __folio_undo_large_rmappable(struct folio *folio) >>>> if (!list_empty(&folio->_deferred_list)) { >>>> ds_queue->split_queue_len--; >>>> list_del_init(&folio->_deferred_list); >>>> + folio_clear_partially_mapped(folio); >>>> } >>>> spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); >>>> } >>>> >>>> -void deferred_split_folio(struct folio *folio) >>>> +void deferred_split_folio(struct folio *folio, bool partially_mapped) >>>> { >>>> struct deferred_split *ds_queue = get_deferred_split_queue(folio); >>>> #ifdef CONFIG_MEMCG >>>> @@ -3485,14 +3487,17 @@ void deferred_split_folio(struct folio *folio) >>>> if (folio_test_swapcache(folio)) >>>> return; >>>> >>>> - if (!list_empty(&folio->_deferred_list)) >>>> - return; >>>> - >>>> spin_lock_irqsave(&ds_queue->split_queue_lock, flags); >>>> + if (partially_mapped) >>>> + folio_set_partially_mapped(folio); >>>> + else >>>> + folio_clear_partially_mapped(folio); >>>> if (list_empty(&folio->_deferred_list)) { >>>> - if (folio_test_pmd_mappable(folio)) >>>> - count_vm_event(THP_DEFERRED_SPLIT_PAGE); >>>> - count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED); >>>> + if (partially_mapped) { >>>> + if (folio_test_pmd_mappable(folio)) >>>> + count_vm_event(THP_DEFERRED_SPLIT_PAGE); >>>> + count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED); >>> >>> This code completely broke MTHP_STAT_SPLIT_DEFERRED for PMD_ORDER. It >>> added the folio to the deferred_list as entirely_mapped >>> (partially_mapped == false). >>> However, when partially_mapped becomes true, there's no opportunity to >>> add it again >>> as it has been there on the list. Are you consistently seeing the counter for >>> PMD_ORDER as 0? >>> >> >> Ah I see it, this should fix it? >> >> -void deferred_split_folio(struct folio *folio) >> +/* partially_mapped=false won't clear PG_partially_mapped folio flag */ >> +void deferred_split_folio(struct folio *folio, bool partially_mapped) >> { >> struct deferred_split *ds_queue = get_deferred_split_queue(folio); >> #ifdef CONFIG_MEMCG >> @@ -3485,14 +3488,14 @@ void deferred_split_folio(struct folio *folio) >> if (folio_test_swapcache(folio)) >> return; >> >> - if (!list_empty(&folio->_deferred_list)) >> - return; >> - >> spin_lock_irqsave(&ds_queue->split_queue_lock, flags); >> - if (list_empty(&folio->_deferred_list)) { >> + if (partially_mapped) { >> + folio_set_partially_mapped(folio); >> if (folio_test_pmd_mappable(folio)) >> count_vm_event(THP_DEFERRED_SPLIT_PAGE); >> count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED); >> + } >> + if (list_empty(&folio->_deferred_list)) { >> list_add_tail(&folio->_deferred_list, &ds_queue->split_queue); >> ds_queue->split_queue_len++; >> #ifdef CONFIG_MEMCG >> > > not enough. as deferred_split_folio(true) won't be called if folio has been > deferred_list in __folio_remove_rmap(): > > if (partially_mapped && folio_test_anon(folio) && > list_empty(&folio->_deferred_list)) > deferred_split_folio(folio, true); > > so you will still see 0. > ah yes, Thanks. So below diff over the current v3 series should work for all cases: diff --git a/mm/huge_memory.c b/mm/huge_memory.c index b4d72479330d..482e3ab60911 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3483,6 +3483,7 @@ void __folio_undo_large_rmappable(struct folio *folio) spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); } +/* partially_mapped=false won't clear PG_partially_mapped folio flag */ void deferred_split_folio(struct folio *folio, bool partially_mapped) { struct deferred_split *ds_queue = get_deferred_split_queue(folio); @@ -3515,16 +3516,16 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped) return; spin_lock_irqsave(&ds_queue->split_queue_lock, flags); - if (partially_mapped) + if (partially_mapped) { folio_set_partially_mapped(folio); - else - folio_clear_partially_mapped(folio); + if (folio_test_pmd_mappable(folio)) + count_vm_event(THP_DEFERRED_SPLIT_PAGE); + count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED); + } else { + /* partially mapped folios cannont become partially unmapped */ + VM_WARN_ON_FOLIO(folio_test_partially_mapped(folio), folio); + } if (list_empty(&folio->_deferred_list)) { - if (partially_mapped) { - if (folio_test_pmd_mappable(folio)) - count_vm_event(THP_DEFERRED_SPLIT_PAGE); - count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED); - } list_add_tail(&folio->_deferred_list, &ds_queue->split_queue); ds_queue->split_queue_len++; #ifdef CONFIG_MEMCG diff --git a/mm/rmap.c b/mm/rmap.c index 9ad558c2bad0..4c330635aa4e 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1578,7 +1578,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio, * Check partially_mapped first to ensure it is a large folio. */ if (partially_mapped && folio_test_anon(folio) && - list_empty(&folio->_deferred_list)) + !folio_test_partially_mapped(folio)) deferred_split_folio(folio, true); __folio_mod_stat(folio, -nr, -nr_pmdmapped);