Re: [PATCH v4 3/3] mm/vmscan: avoid split lazyfree THP during shrink_folio_list()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 1 May 2024, at 0:27, Lance Yang wrote:

> When the user no longer requires the pages, they would use
> madvise(MADV_FREE) to mark the pages as lazy free. Subsequently, they
> typically would not re-write to that memory again.
>
> During memory reclaim, if we detect that the large folio and its PMD are
> both still marked as clean and there are no unexpected references
> (such as GUP), so we can just discard the memory lazily, improving the
> efficiency of memory reclamation in this case.  On an Intel i5 CPU, reclaiming 1GiB of lazyfree THPs using
> mem_cgroup_force_empty() results in the following runtimes in seconds
> (shorter is better):
>
> --------------------------------------------
> |     Old       |      New       |  Change  |
> --------------------------------------------
> |   0.683426    |    0.049197    |  -92.80% |
> --------------------------------------------
>
> Suggested-by: Zi Yan <ziy@xxxxxxxxxx>
> Suggested-by: David Hildenbrand <david@xxxxxxxxxx>
> Signed-off-by: Lance Yang <ioworker0@xxxxxxxxx>
> ---
>  include/linux/huge_mm.h |  9 +++++
>  mm/huge_memory.c        | 73 +++++++++++++++++++++++++++++++++++++++++
>  mm/rmap.c               |  3 ++
>  3 files changed, 85 insertions(+)
>
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index 38c4b5537715..017cee864080 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -411,6 +411,8 @@ static inline bool thp_migration_supported(void)
>
>  void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
>  			   pmd_t *pmd, bool freeze, struct folio *folio);
> +bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
> +			   pmd_t *pmdp, struct folio *folio);
>
>  static inline void align_huge_pmd_range(struct vm_area_struct *vma,
>  					unsigned long *start,
> @@ -492,6 +494,13 @@ static inline void align_huge_pmd_range(struct vm_area_struct *vma,
>  					unsigned long *start,
>  					unsigned long *end) {}
>
> +static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma,
> +					 unsigned long addr, pmd_t *pmdp,
> +					 struct folio *folio)
> +{
> +	return false;
> +}
> +
>  #define split_huge_pud(__vma, __pmd, __address)	\
>  	do { } while (0)
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 145505a1dd05..90fdef847a88 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2690,6 +2690,79 @@ static void unmap_folio(struct folio *folio)
>  	try_to_unmap_flush();
>  }
>
> +static bool __discard_trans_pmd_locked(struct vm_area_struct *vma,
> +				       unsigned long addr, pmd_t *pmdp,
> +				       struct folio *folio)
> +{
> +	struct mm_struct *mm = vma->vm_mm;
> +	int ref_count, map_count;
> +	pmd_t orig_pmd = *pmdp;
> +	struct mmu_gather tlb;
> +	struct page *page;
> +
> +	if (pmd_dirty(orig_pmd) || folio_test_dirty(folio))
> +		return false;
> +	if (unlikely(!pmd_present(orig_pmd) || !pmd_trans_huge(orig_pmd)))
> +		return false;
> +
> +	page = pmd_page(orig_pmd);
> +	if (unlikely(page_folio(page) != folio))
> +		return false;
> +
> +	tlb_gather_mmu(&tlb, mm);
> +	orig_pmd = pmdp_huge_get_and_clear(mm, addr, pmdp);
> +	tlb_remove_pmd_tlb_entry(&tlb, pmdp, addr);
> +
> +	/*
> +	 * Syncing against concurrent GUP-fast:
> +	 * - clear PMD; barrier; read refcount
> +	 * - inc refcount; barrier; read PMD
> +	 */
> +	smp_mb();
> +
> +	ref_count = folio_ref_count(folio);
> +	map_count = folio_mapcount(folio);
> +
> +	/*
> +	 * Order reads for folio refcount and dirty flag
> +	 * (see comments in __remove_mapping()).
> +	 */
> +	smp_rmb();
> +
> +	/*
> +	 * If the PMD or folio is redirtied at this point, or if there are
> +	 * unexpected references, we will give up to discard this folio
> +	 * and remap it.
> +	 *
> +	 * The only folio refs must be one from isolation plus the rmap(s).
> +	 */
> +	if (ref_count != map_count + 1 || folio_test_dirty(folio) ||
> +	    pmd_dirty(orig_pmd)) {
> +		set_pmd_at(mm, addr, pmdp, orig_pmd);
> +		return false;
> +	}
> +
> +	folio_remove_rmap_pmd(folio, page, vma);
> +	zap_deposited_table(mm, pmdp);
> +	add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR);
> +	folio_put(folio);
> +
> +	return true;
> +}
> +
> +bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
> +			   pmd_t *pmdp, struct folio *folio)
> +{
> +	VM_WARN_ON_FOLIO(!folio_test_pmd_mappable(folio), folio);
> +	VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
> +	VM_WARN_ON_ONCE(!IS_ALIGNED(addr, HPAGE_PMD_SIZE));
> +
> +	if (folio_test_anon(folio) && !folio_test_swapbacked(folio))
> +		return __discard_trans_pmd_locked(vma, addr, pmdp, folio);
> +
> +	return false;
> +}
> +
>  static void remap_page(struct folio *folio, unsigned long nr)
>  {
>  	int i = 0;
> diff --git a/mm/rmap.c b/mm/rmap.c
> index 432601154583..1d3d30cb752c 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1675,6 +1675,9 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
>  		}
>
>  		if (!pvmw.pte && (flags & TTU_SPLIT_HUGE_PMD)) {
> +			if (unmap_huge_pmd_locked(vma, range.start, pvmw.pmd,
> +						  folio))
> +				goto walk_done;

You might not need to check (flags & TTU_SPLIT_HUGE_PMD) for
unmap_huge_pmd_locked(), since you are unmapping a PMD here.
TTU_SPLIT_HUGE_PMD is here because try_to_unmap_one() was not able to unmap
a PMD. You probably can remove it for callers that are unmapping
the folio but not the ones are swapping.



>  			/*
>  			 * We temporarily have to drop the PTL and start once
>  			 * again from that now-PTE-mapped page table.
> -- 
> 2.33.1


--
Best Regards,
Yan, Zi

Attachment: signature.asc
Description: OpenPGP digital signature


[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux