On 14/08/2024 12:20, Barry Song wrote: > On Wed, Aug 14, 2024 at 11:11 PM Usama Arif <usamaarif642@xxxxxxxxx> wrote: >> >> >> >> On 14/08/2024 11:44, Barry Song wrote: >>> On Wed, Aug 14, 2024 at 12:03 AM Usama Arif <usamaarif642@xxxxxxxxx> wrote: >>>> >>>> Currently folio->_deferred_list is used to keep track of >>>> partially_mapped folios that are going to be split under memory >>>> pressure. In the next patch, all THPs that are faulted in and collapsed >>>> by khugepaged are also going to be tracked using _deferred_list. >>>> >>>> This patch introduces a pageflag to be able to distinguish between >>>> partially mapped folios and others in the deferred_list at split time in >>>> deferred_split_scan. Its needed as __folio_remove_rmap decrements >>>> _mapcount, _large_mapcount and _entire_mapcount, hence it won't be >>>> possible to distinguish between partially mapped folios and others in >>>> deferred_split_scan. >>>> >>>> Eventhough it introduces an extra flag to track if the folio is >>>> partially mapped, there is no functional change intended with this >>>> patch and the flag is not useful in this patch itself, it will >>>> become useful in the next patch when _deferred_list has non partially >>>> mapped folios. >>>> >>>> Signed-off-by: Usama Arif <usamaarif642@xxxxxxxxx> >>>> --- >>>> include/linux/huge_mm.h | 4 ++-- >>>> include/linux/page-flags.h | 3 +++ >>>> mm/huge_memory.c | 21 +++++++++++++-------- >>>> mm/hugetlb.c | 1 + >>>> mm/internal.h | 4 +++- >>>> mm/memcontrol.c | 3 ++- >>>> mm/migrate.c | 3 ++- >>>> mm/page_alloc.c | 5 +++-- >>>> mm/rmap.c | 3 ++- >>>> mm/vmscan.c | 3 ++- >>>> 10 files changed, 33 insertions(+), 17 deletions(-) >>>> >>>> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h >>>> index 4c32058cacfe..969f11f360d2 100644 >>>> --- a/include/linux/huge_mm.h >>>> +++ b/include/linux/huge_mm.h >>>> @@ -321,7 +321,7 @@ static inline int split_huge_page(struct page *page) >>>> { >>>> return split_huge_page_to_list_to_order(page, NULL, 0); >>>> } >>>> -void deferred_split_folio(struct folio *folio); >>>> +void deferred_split_folio(struct folio *folio, bool partially_mapped); >>>> >>>> void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, >>>> unsigned long address, bool freeze, struct folio *folio); >>>> @@ -495,7 +495,7 @@ static inline int split_huge_page(struct page *page) >>>> { >>>> return 0; >>>> } >>>> -static inline void deferred_split_folio(struct folio *folio) {} >>>> +static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {} >>>> #define split_huge_pmd(__vma, __pmd, __address) \ >>>> do { } while (0) >>>> >>>> diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h >>>> index a0a29bd092f8..cecc1bad7910 100644 >>>> --- a/include/linux/page-flags.h >>>> +++ b/include/linux/page-flags.h >>>> @@ -182,6 +182,7 @@ enum pageflags { >>>> /* At least one page in this folio has the hwpoison flag set */ >>>> PG_has_hwpoisoned = PG_active, >>>> PG_large_rmappable = PG_workingset, /* anon or file-backed */ >>>> + PG_partially_mapped, /* was identified to be partially mapped */ >>>> }; >>>> >>>> #define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1) >>>> @@ -861,8 +862,10 @@ static inline void ClearPageCompound(struct page *page) >>>> ClearPageHead(page); >>>> } >>>> FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE) >>>> +FOLIO_FLAG(partially_mapped, FOLIO_SECOND_PAGE) >>>> #else >>>> FOLIO_FLAG_FALSE(large_rmappable) >>>> +FOLIO_FLAG_FALSE(partially_mapped) >>>> #endif >>>> >>>> #define PG_head_mask ((1UL << PG_head)) >>>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c >>>> index 6df0e9f4f56c..c024ab0f745c 100644 >>>> --- a/mm/huge_memory.c >>>> +++ b/mm/huge_memory.c >>>> @@ -3397,6 +3397,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, >>>> * page_deferred_list. >>>> */ >>>> list_del_init(&folio->_deferred_list); >>>> + folio_clear_partially_mapped(folio); >>>> } >>>> spin_unlock(&ds_queue->split_queue_lock); >>>> if (mapping) { >>>> @@ -3453,11 +3454,12 @@ void __folio_undo_large_rmappable(struct folio *folio) >>>> if (!list_empty(&folio->_deferred_list)) { >>>> ds_queue->split_queue_len--; >>>> list_del_init(&folio->_deferred_list); >>>> + folio_clear_partially_mapped(folio); >>>> } >>>> spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); >>>> } >>>> >>>> -void deferred_split_folio(struct folio *folio) >>>> +void deferred_split_folio(struct folio *folio, bool partially_mapped) >>>> { >>>> struct deferred_split *ds_queue = get_deferred_split_queue(folio); >>>> #ifdef CONFIG_MEMCG >>>> @@ -3485,14 +3487,17 @@ void deferred_split_folio(struct folio *folio) >>>> if (folio_test_swapcache(folio)) >>>> return; >>>> >>>> - if (!list_empty(&folio->_deferred_list)) >>>> - return; >>>> - >>>> spin_lock_irqsave(&ds_queue->split_queue_lock, flags); >>>> + if (partially_mapped) >>>> + folio_set_partially_mapped(folio); >>>> + else >>>> + folio_clear_partially_mapped(folio); >>> >>> Hi Usama, >>> >>> Do we need this? When can a partially_mapped folio on deferred_list become >>> non-partially-mapped and need a clear? I understand transferring from >>> entirely_map >>> to partially_mapped is a one way process? partially_mapped folios can't go back >>> to entirely_mapped? >>> >> Hi Barry, >> >> deferred_split_folio function is called in 3 places after this series, at fault, collapse and partial mapping. partial mapping can only happen after fault/collapse, and we have FOLIO_FLAG_FALSE(partially_mapped), i.e. flag initialized to false, so technically its not needed. A partially_mapped folio on deferred list wont become non-partially mapped. >> >> I just did it as a precaution if someone ever changes the kernel and calls deferred_split_folio with partially_mapped set to false after it had been true. The function arguments of deferred_split_folio make it seem that passing partially_mapped=false as an argument would clear it, which is why I cleared it as well. I could change the patch to something like below if it makes things better? i.e. add a comment at the top of the function: >> > > to me, it seems a BUG to call with false after a folio has been > partially_mapped. So I'd rather > VM_WARN_ON_FOLIO(folio_test_partially_mapped(folio), folio); > > The below should also fix the MTHP_STAT_SPLIT_DEFERRED > counter this patch is breaking? > > @@ -3515,16 +3522,18 @@ void deferred_split_folio(struct folio *folio, > bool partially_mapped) > return; > > spin_lock_irqsave(&ds_queue->split_queue_lock, flags); > - if (partially_mapped) > - folio_set_partially_mapped(folio); > - else > - folio_clear_partially_mapped(folio); > - if (list_empty(&folio->_deferred_list)) { > - if (partially_mapped) { > + if (partially_mapped) { > + if (!folio_test_set_partially_mapped(folio)) { > + mod_mthp_stat(folio_order(folio), > + MTHP_STAT_NR_SPLIT_DEFERRED, 1); > if (folio_test_pmd_mappable(folio)) > count_vm_event(THP_DEFERRED_SPLIT_PAGE); > count_mthp_stat(folio_order(folio), > MTHP_STAT_SPLIT_DEFERRED); > } > + } > + VM_WARN_ON_FOLIO(folio_test_partially_mapped(folio), folio); > + > + if (list_empty(&folio->_deferred_list)) { > list_add_tail(&folio->_deferred_list, &ds_queue->split_queue); > ds_queue->split_queue_len++; > #ifdef CONFIG_MEMCG > > So I had sent the below without the VM_WARN_ON_FOLIO as a reply to the other email, below is with VM_WARN. -void deferred_split_folio(struct folio *folio) +/* partially_mapped=false won't clear PG_partially_mapped folio flag */ +void deferred_split_folio(struct folio *folio, bool partially_mapped) { struct deferred_split *ds_queue = get_deferred_split_queue(folio); #ifdef CONFIG_MEMCG @@ -3485,14 +3488,17 @@ void deferred_split_folio(struct folio *folio) if (folio_test_swapcache(folio)) return; - if (!list_empty(&folio->_deferred_list)) - return; - spin_lock_irqsave(&ds_queue->split_queue_lock, flags); - if (list_empty(&folio->_deferred_list)) { + if (partially_mapped) { + folio_set_partially_mapped(folio); if (folio_test_pmd_mappable(folio)) count_vm_event(THP_DEFERRED_SPLIT_PAGE); count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED); + } else { + /* partially mapped folios cannont become partially unmapped */ + VM_WARN_ON_FOLIO(folio_test_partially_mapped(folio), folio); + } + if (list_empty(&folio->_deferred_list)) { list_add_tail(&folio->_deferred_list, &ds_queue->split_queue); ds_queue->split_queue_len++; #ifdef CONFIG_MEMCG Thanks