Re: [PATCH 29/31] mm/memory: handle_pte_fault() use pte_offset_map_nolock()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 





On 2023/5/22 13:26, Hugh Dickins wrote:
handle_pte_fault() use pte_offset_map_nolock() to get the vmf.ptl which
corresponds to vmf.pte, instead of pte_lockptr() being used later, when
there's a chance that the pmd entry might have changed, perhaps to none,
or to a huge pmd, with no split ptlock in its struct page.

Remove its pmd_devmap_trans_unstable() call: pte_offset_map_nolock()
will handle that case by failing.  Update the "morph" comment above,
looking forward to when shmem or file collapse to THP may not take
mmap_lock for write (or not at all).

do_numa_page() use the vmf->ptl from handle_pte_fault() at first, but
refresh it when refreshing vmf->pte.

do_swap_page()'s pte_unmap_same() (the thing that takes ptl to verify a
two-part PAE orig_pte) use the vmf->ptl from handle_pte_fault() too; but
do_swap_page() is also used by anon THP's __collapse_huge_page_swapin(),
so adjust that to set vmf->ptl by pte_offset_map_nolock().

Signed-off-by: Hugh Dickins <hughd@xxxxxxxxxx>
---
  mm/khugepaged.c |  6 ++++--
  mm/memory.c     | 38 +++++++++++++-------------------------
  2 files changed, 17 insertions(+), 27 deletions(-)

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 49cfa7cdfe93..c11db2e78e95 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1005,6 +1005,7 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
  	unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
  	int result;
  	pte_t *pte = NULL;
+	spinlock_t *ptl;
for (address = haddr; address < end; address += PAGE_SIZE) {
  		struct vm_fault vmf = {
@@ -1016,7 +1017,7 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
  		};
if (!pte++) {
-			pte = pte_offset_map(pmd, address);
+			pte = pte_offset_map_nolock(mm, pmd, address, &ptl);
  			if (!pte) {
  				mmap_read_unlock(mm);
  				result = SCAN_PMD_NULL;
@@ -1024,11 +1025,12 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
  			}
  		}
- vmf.orig_pte = *pte;
+		vmf.orig_pte = ptep_get_lockless(pte);
  		if (!is_swap_pte(vmf.orig_pte))
  			continue;
vmf.pte = pte;
+		vmf.ptl = ptl;
  		ret = do_swap_page(&vmf);
  		/* Which unmaps pte (after perhaps re-checking the entry) */
  		pte = NULL;
diff --git a/mm/memory.c b/mm/memory.c
index c7b920291a72..4ec46eecefd3 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2786,10 +2786,9 @@ static inline int pte_unmap_same(struct vm_fault *vmf)
  	int same = 1;
  #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
  	if (sizeof(pte_t) > sizeof(unsigned long)) {
-		spinlock_t *ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
-		spin_lock(ptl);
+		spin_lock(vmf->ptl);
  		same = pte_same(*vmf->pte, vmf->orig_pte);
-		spin_unlock(ptl);
+		spin_unlock(vmf->ptl);
  	}
  #endif
  	pte_unmap(vmf->pte);
@@ -4696,7 +4695,6 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
  	 * validation through pte_unmap_same(). It's of NUMA type but
  	 * the pfn may be screwed if the read is non atomic.
  	 */
-	vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
  	spin_lock(vmf->ptl);
  	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
  		pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -4767,8 +4765,10 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
  		flags |= TNF_MIGRATED;
  	} else {
  		flags |= TNF_MIGRATE_FAIL;
-		vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
-		spin_lock(vmf->ptl);
+		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
+					       vmf->address, &vmf->ptl);
+		if (unlikely(!vmf->pte))
+			goto out;
  		if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
  			pte_unmap_unlock(vmf->pte, vmf->ptl);
  			goto out;
@@ -4897,27 +4897,16 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
  		vmf->pte = NULL;
  		vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID;
  	} else {
-		/*
-		 * If a huge pmd materialized under us just retry later.  Use
-		 * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead
-		 * of pmd_trans_huge() to ensure the pmd didn't become
-		 * pmd_trans_huge under us and then back to pmd_none, as a
-		 * result of MADV_DONTNEED running immediately after a huge pmd
-		 * fault in a different thread of this mm, in turn leading to a
-		 * misleading pmd_trans_huge() retval. All we have to ensure is
-		 * that it is a regular pmd that we can walk with
-		 * pte_offset_map() and we can do that through an atomic read
-		 * in C, which is what pmd_trans_unstable() provides.
-		 */
-		if (pmd_devmap_trans_unstable(vmf->pmd))
-			return 0;
  		/*
  		 * A regular pmd is established and it can't morph into a huge
-		 * pmd from under us anymore at this point because we hold the
-		 * mmap_lock read mode and khugepaged takes it in write mode.
-		 * So now it's safe to run pte_offset_map().
+		 * pmd by anon khugepaged, since that takes mmap_lock in write
+		 * mode; but shmem or file collapse to THP could still morph
+		 * it into a huge pmd: just retry later if so.
  		 */
-		vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
+		vmf->pte = pte_offset_map_nolock(vmf->vma->vm_mm, vmf->pmd,
+						 vmf->address, &vmf->ptl);
+		if (unlikely(!vmf->pte))
+			return 0;

Just jump to the retry label below?

diff --git a/mm/memory.c b/mm/memory.c
index 63632a5eafc1..2e712fe6f4be 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4897,7 +4897,8 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
 {
        pte_t entry;

-       if (unlikely(pmd_none(*vmf->pmd))) {
+retry:
+       if (unlikely(pmd_none(READ_ONCE(*vmf->pmd)))) {
                /*
* Leave __pte_alloc() until later: because vm_ops->fault may
                 * want to allocate huge page, and if we expose page table

  		vmf->orig_pte = ptep_get_lockless(vmf->pte);
  		vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID;
@@ -4936,7 +4925,6 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
  	if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
  		return do_numa_page(vmf);
- vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
  	spin_lock(vmf->ptl);
  	entry = vmf->orig_pte;
  	if (unlikely(!pte_same(*vmf->pte, entry))) {

--
Thanks,
Qi




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux