Re: [PATCH 2/2] mm/migrate: move common code to numa_migrate_check (was numa_migrate_prep)

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 8 Aug 2024, at 10:59, Zi Yan wrote:

> On 8 Aug 2024, at 0:14, Huang, Ying wrote:
>
>> Zi Yan <ziy@xxxxxxxxxx> writes:
>>
>>> do_numa_page() and do_huge_pmd_numa_page() share a lot of common code. To
>>> reduce redundancy, move common code to numa_migrate_prep() and rename
>>> the function to numa_migrate_check() to reflect its functionality.
>>>
>>> Now do_huge_pmd_numa_page() also checks shared folios to set TNF_SHARED
>>> flag.
>>>
>>> Suggested-by: David Hildenbrand <david@xxxxxxxxxx>
>>> Signed-off-by: Zi Yan <ziy@xxxxxxxxxx>
>>> ---
>>>  mm/huge_memory.c | 14 ++-------
>>>  mm/internal.h    |  5 ++--
>>>  mm/memory.c      | 76 ++++++++++++++++++++++++------------------------
>>>  3 files changed, 44 insertions(+), 51 deletions(-)
>>>
>>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
>>> index a3c018f2b554..9b312cae6775 100644
>>> --- a/mm/huge_memory.c
>>> +++ b/mm/huge_memory.c
>>> @@ -1699,18 +1699,10 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
>>>  	if (!folio)
>>>  		goto out_map;
>>>
>>> -	/* See similar comment in do_numa_page for explanation */
>>> -	if (!writable)
>>> -		flags |= TNF_NO_GROUP;
>>> -
>>>  	nid = folio_nid(folio);
>>
>> NITPICK: It appears that we can remove "nid" local variable.
>
> I thought about it, but
>
> 1. if folio is NULL from vm_normal_folio_pmd(), nid remains NUMA_NO_NODE,
> 2. if migration succeeds, nid is changed to target_nid,
> 3. if migration fails, nid is the folio node id,
>
> all three are consumed by if (nid != NUMA_NO_NODE) before task_numa_fault().
>
> I will give it a try in next version and see if I can remove it.
>
>>
>>> -	/*
>>> -	 * For memory tiering mode, cpupid of slow memory page is used
>>> -	 * to record page access time.  So use default value.
>>> -	 */
>>> -	if (!folio_use_access_time(folio))
>>> -		last_cpupid = folio_last_cpupid(folio);
>>> -	target_nid = numa_migrate_prep(folio, vmf, haddr, nid, &flags);
>>> +
>>> +	target_nid = numa_migrate_check(folio, vmf, haddr, &flags, writable,
>>> +					&last_cpupid);
>>>  	if (target_nid == NUMA_NO_NODE)
>>>  		goto out_map;
>>>  	if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
>>> diff --git a/mm/internal.h b/mm/internal.h
>>> index 52f7fc4e8ac3..fb16e18c9761 100644
>>> --- a/mm/internal.h
>>> +++ b/mm/internal.h
>>> @@ -1191,8 +1191,9 @@ void vunmap_range_noflush(unsigned long start, unsigned long end);
>>>
>>>  void __vunmap_range_noflush(unsigned long start, unsigned long end);
>>>
>>> -int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
>>> -		      unsigned long addr, int page_nid, int *flags);
>>> +int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
>>> +		      unsigned long addr, int *flags, bool writable,
>>> +		      int *last_cpupid);
>>>
>>>  void free_zone_device_folio(struct folio *folio);
>>>  int migrate_device_coherent_page(struct page *page);
>>> diff --git a/mm/memory.c b/mm/memory.c
>>> index 503d493263df..b093df652c11 100644
>>> --- a/mm/memory.c
>>> +++ b/mm/memory.c
>>> @@ -5368,16 +5368,43 @@ static vm_fault_t do_fault(struct vm_fault *vmf)
>>>  	return ret;
>>>  }
>>>
>>> -int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
>>> -		      unsigned long addr, int page_nid, int *flags)
>>> +int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
>>> +		      unsigned long addr, int *flags,
>>> +		      bool writable, int *last_cpupid)
>>>  {
>>>  	struct vm_area_struct *vma = vmf->vma;
>>>
>>> +	/*
>>> +	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
>>> +	 * much anyway since they can be in shared cache state. This misses
>>> +	 * the case where a mapping is writable but the process never writes
>>> +	 * to it but pte_write gets cleared during protection updates and
>>> +	 * pte_dirty has unpredictable behaviour between PTE scan updates,
>>> +	 * background writeback, dirty balancing and application behaviour.
>>> +	 */
>>> +	if (!writable)
>>> +		*flags |= TNF_NO_GROUP;
>>> +
>>> +	/*
>>> +	 * Flag if the folio is shared between multiple address spaces. This
>>> +	 * is later used when determining whether to group tasks together
>>> +	 */
>>> +	if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
>>> +		*flags |= TNF_SHARED;
>>> +	/*
>>> +	 * For memory tiering mode, cpupid of slow memory page is used
>>> +	 * to record page access time.  So use default value.
>>> +	 */
>>> +	if (folio_use_access_time(folio))
>>> +		*last_cpupid = (-1 & LAST_CPUPID_MASK);
>>> +	else
>>> +		*last_cpupid = folio_last_cpupid(folio);
>>> +
>>>  	/* Record the current PID acceesing VMA */
>>>  	vma_set_access_pid_bit(vma);
>>>
>>>  	count_vm_numa_event(NUMA_HINT_FAULTS);
>>> -	if (page_nid == numa_node_id()) {
>>> +	if (folio_nid(folio) == numa_node_id()) {
>>>  		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
>>>  		*flags |= TNF_FAULT_LOCAL;
>>>  	}
>>> @@ -5442,13 +5469,13 @@ static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_stru
>>>  static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>>  {
>>>  	struct vm_area_struct *vma = vmf->vma;
>>> +	pte_t old_pte = vmf->orig_pte;
>>
>> The usage of old_pte is different from other use cases.  Where,
>>
>> old_pte = *pte;
>> check old_pte and orig_pte
>> generate new_pte from old_pte
>> set new_pte
>>
>> We have used this before in do_numa_page(), but not do that now.  But I
>> still think that it's better to follow the convention partly if
>> possible.  This makes code easier to be read.  I notices that we don't
>> follow it in do_huge_pmd_numa_page(), we may change that?
>
> Sure, since I am trying to make do_numa_page() and do_huge_pmd_numa_page()
> use the same pattern.
>
>>
>>> +	pte_t pte;
>>>  	struct folio *folio = NULL;
>>>  	int nid = NUMA_NO_NODE;
>>>  	bool writable = false, ignore_writable = false;
>>>  	bool pte_write_upgrade = vma_wants_manual_pte_write_upgrade(vma);
>>> -	int last_cpupid;
>>> -	int target_nid;
>>> -	pte_t pte, old_pte;
>>> +	int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
>>
>> Because we will initialize last_cpupid in numa_migrate_check(), we don't
>> need to initialize it here?
>
> Will remove it.
>
> Thanks for the review.

I attached my fixup and updated patch 2.

Let me know how it looks.

--
Best Regards,
Yan, Zi
From 9d2f9fc7a3e7d21af820c91aa239b6a2511d7840 Mon Sep 17 00:00:00 2001
From: Zi Yan <ziy@xxxxxxxxxx>
Date: Thu, 8 Aug 2024 11:17:03 -0400
Subject: [PATCH] mm/migrate: move common code to numa_migrate_check (was
 numa_migrate_prep)

do_numa_page() and do_huge_pmd_numa_page() share a lot of common code. To
reduce redundancy, move common code to numa_migrate_prep() and rename
the function to numa_migrate_check() to reflect its functionality.

Now do_huge_pmd_numa_page() also checks shared folios to set TNF_SHARED
flag.

Suggested-by: David Hildenbrand <david@xxxxxxxxxx>
Signed-off-by: Zi Yan <ziy@xxxxxxxxxx>
---
 mm/huge_memory.c | 29 +++++++++-------------
 mm/internal.h    |  5 ++--
 mm/memory.c      | 63 +++++++++++++++++++++++++-----------------------
 3 files changed, 47 insertions(+), 50 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4e4364a17e6d..96a52e71d167 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1669,22 +1669,23 @@ static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	pmd_t oldpmd = vmf->orig_pmd;
-	pmd_t pmd;
 	struct folio *folio;
 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
 	int nid = NUMA_NO_NODE;
-	int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
+	int target_nid, last_cpupid;
+	pmd_t pmd, old_pmd;
 	bool writable = false;
 	int flags = 0;
 
 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
-	if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
+	old_pmd = pmdp_get(vmf->pmd);
+
+	if (unlikely(!pmd_same(old_pmd, vmf->orig_pmd))) {
 		spin_unlock(vmf->ptl);
 		return 0;
 	}
 
-	pmd = pmd_modify(oldpmd, vma->vm_page_prot);
+	pmd = pmd_modify(old_pmd, vma->vm_page_prot);
 
 	/*
 	 * Detect now whether the PMD could be writable; this information
@@ -1699,18 +1700,10 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
 	if (!folio)
 		goto out_map;
 
-	/* See similar comment in do_numa_page for explanation */
-	if (!writable)
-		flags |= TNF_NO_GROUP;
-
 	nid = folio_nid(folio);
-	/*
-	 * For memory tiering mode, cpupid of slow memory page is used
-	 * to record page access time.  So use default value.
-	 */
-	if (!folio_use_access_time(folio))
-		last_cpupid = folio_last_cpupid(folio);
-	target_nid = numa_migrate_prep(folio, vmf, haddr, nid, &flags);
+
+	target_nid = numa_migrate_check(folio, vmf, haddr, &flags, writable,
+					&last_cpupid);
 	if (target_nid == NUMA_NO_NODE)
 		goto out_map;
 	if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
@@ -1728,7 +1721,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
 	} else {
 		flags |= TNF_MIGRATE_FAIL;
 		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
-		if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
+		if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) {
 			spin_unlock(vmf->ptl);
 			return 0;
 		}
@@ -1736,7 +1729,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
 
 out_map:
 	/* Restore the PMD */
-	pmd = pmd_modify(oldpmd, vma->vm_page_prot);
+	pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot);
 	pmd = pmd_mkyoung(pmd);
 	if (writable)
 		pmd = pmd_mkwrite(pmd, vma);
diff --git a/mm/internal.h b/mm/internal.h
index 52f7fc4e8ac3..fb16e18c9761 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1191,8 +1191,9 @@ void vunmap_range_noflush(unsigned long start, unsigned long end);
 
 void __vunmap_range_noflush(unsigned long start, unsigned long end);
 
-int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
-		      unsigned long addr, int page_nid, int *flags);
+int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
+		      unsigned long addr, int *flags, bool writable,
+		      int *last_cpupid);
 
 void free_zone_device_folio(struct folio *folio);
 int migrate_device_coherent_page(struct page *page);
diff --git a/mm/memory.c b/mm/memory.c
index d9b1dff9dc57..3441f60d54ef 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5368,16 +5368,43 @@ static vm_fault_t do_fault(struct vm_fault *vmf)
 	return ret;
 }
 
-int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
-		      unsigned long addr, int page_nid, int *flags)
+int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
+		      unsigned long addr, int *flags,
+		      bool writable, int *last_cpupid)
 {
 	struct vm_area_struct *vma = vmf->vma;
 
+	/*
+	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
+	 * much anyway since they can be in shared cache state. This misses
+	 * the case where a mapping is writable but the process never writes
+	 * to it but pte_write gets cleared during protection updates and
+	 * pte_dirty has unpredictable behaviour between PTE scan updates,
+	 * background writeback, dirty balancing and application behaviour.
+	 */
+	if (!writable)
+		*flags |= TNF_NO_GROUP;
+
+	/*
+	 * Flag if the folio is shared between multiple address spaces. This
+	 * is later used when determining whether to group tasks together
+	 */
+	if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
+		*flags |= TNF_SHARED;
+	/*
+	 * For memory tiering mode, cpupid of slow memory page is used
+	 * to record page access time.  So use default value.
+	 */
+	if (folio_use_access_time(folio))
+		*last_cpupid = (-1 & LAST_CPUPID_MASK);
+	else
+		*last_cpupid = folio_last_cpupid(folio);
+
 	/* Record the current PID acceesing VMA */
 	vma_set_access_pid_bit(vma);
 
 	count_vm_numa_event(NUMA_HINT_FAULTS);
-	if (page_nid == numa_node_id()) {
+	if (folio_nid(folio) == numa_node_id()) {
 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
 		*flags |= TNF_FAULT_LOCAL;
 	}
@@ -5479,35 +5506,11 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
 	if (!folio || folio_is_zone_device(folio))
 		goto out_map;
 
-	/*
-	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
-	 * much anyway since they can be in shared cache state. This misses
-	 * the case where a mapping is writable but the process never writes
-	 * to it but pte_write gets cleared during protection updates and
-	 * pte_dirty has unpredictable behaviour between PTE scan updates,
-	 * background writeback, dirty balancing and application behaviour.
-	 */
-	if (!writable)
-		flags |= TNF_NO_GROUP;
-
-	/*
-	 * Flag if the folio is shared between multiple address spaces. This
-	 * is later used when determining whether to group tasks together
-	 */
-	if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED))
-		flags |= TNF_SHARED;
-
 	nid = folio_nid(folio);
 	nr_pages = folio_nr_pages(folio);
-	/*
-	 * For memory tiering mode, cpupid of slow memory page is used
-	 * to record page access time.  So use default value.
-	 */
-	if (folio_use_access_time(folio))
-		last_cpupid = (-1 & LAST_CPUPID_MASK);
-	else
-		last_cpupid = folio_last_cpupid(folio);
-	target_nid = numa_migrate_prep(folio, vmf, vmf->address, nid, &flags);
+
+	target_nid = numa_migrate_check(folio, vmf, vmf->address, &flags,
+					writable, &last_cpupid);
 	if (target_nid == NUMA_NO_NODE)
 		goto out_map;
 	if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
-- 
2.43.0

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 5610289865a7..96a52e71d167 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1669,22 +1669,23 @@ static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	pmd_t oldpmd = vmf->orig_pmd;
-	pmd_t pmd;
 	struct folio *folio;
 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
 	int nid = NUMA_NO_NODE;
-	int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
+	int target_nid, last_cpupid;
+	pmd_t pmd, old_pmd;
 	bool writable = false;
 	int flags = 0;
 
 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
-	if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
+	old_pmd = pmdp_get(vmf->pmd);
+
+	if (unlikely(!pmd_same(old_pmd, vmf->orig_pmd))) {
 		spin_unlock(vmf->ptl);
 		return 0;
 	}
 
-	pmd = pmd_modify(oldpmd, vma->vm_page_prot);
+	pmd = pmd_modify(old_pmd, vma->vm_page_prot);
 
 	/*
 	 * Detect now whether the PMD could be writable; this information
@@ -1720,7 +1721,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
 	} else {
 		flags |= TNF_MIGRATE_FAIL;
 		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
-		if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
+		if (unlikely(!pmd_same(pmdp_get(vmf->pmd), vmf->orig_pmd))) {
 			spin_unlock(vmf->ptl);
 			return 0;
 		}
@@ -1728,7 +1729,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
 
 out_map:
 	/* Restore the PMD */
-	pmd = pmd_modify(oldpmd, vma->vm_page_prot);
+	pmd = pmd_modify(pmdp_get(vmf->pmd), vma->vm_page_prot);
 	pmd = pmd_mkyoung(pmd);
 	if (writable)
 		pmd = pmd_mkwrite(pmd, vma);
diff --git a/mm/memory.c b/mm/memory.c
index f186a8d8c992..3441f60d54ef 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5469,13 +5469,13 @@ static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_stru
 static vm_fault_t do_numa_page(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	pte_t old_pte = vmf->orig_pte;
-	pte_t pte;
 	struct folio *folio = NULL;
 	int nid = NUMA_NO_NODE;
 	bool writable = false, ignore_writable = false;
 	bool pte_write_upgrade = vma_wants_manual_pte_write_upgrade(vma);
-	int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
+	int last_cpupid;
+	int target_nid;
+	pte_t pte, old_pte;
 	int flags = 0, nr_pages;
 
 	/*
@@ -5483,7 +5483,10 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
 	 * table lock, that its contents have not changed during fault handling.
 	 */
 	spin_lock(vmf->ptl);
-	if (unlikely(!pte_same(old_pte, ptep_get(vmf->pte)))) {
+	/* Read the live PTE from the page tables: */
+	old_pte = ptep_get(vmf->pte);
+
+	if (unlikely(!pte_same(old_pte, vmf->orig_pte))) {
 		pte_unmap_unlock(vmf->pte, vmf->ptl);
 		return 0;
 	}
@@ -5530,7 +5533,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
 					       vmf->address, &vmf->ptl);
 		if (unlikely(!vmf->pte))
 			return 0;
-		if (unlikely(!pte_same(old_pte, ptep_get(vmf->pte)))) {
+		if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
 			pte_unmap_unlock(vmf->pte, vmf->ptl);
 			return 0;
 		}

Attachment: signature.asc
Description: OpenPGP digital signature


[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux