The patch titled Subject: damon: use pmdp_get instead of drectly dereferencing pmd has been added to the -mm mm-unstable branch. Its filename is damon-use-pmdp_get-instead-of-drectly-dereferencing-pmd.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/damon-use-pmdp_get-instead-of-drectly-dereferencing-pmd.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Levi Yun <ppbuk5246@xxxxxxxxx> Subject: damon: use pmdp_get instead of drectly dereferencing pmd Date: Fri, 28 Jul 2023 06:21:57 +0900 As ptep_get, Use the pmdp_get wrapper when we accessing pmdval instead of directly dereferencing pmd. Link: https://lkml.kernel.org/r/20230727212157.2985025-1-ppbuk5246@xxxxxxxxx Signed-off-by: Levi Yun <ppbuk5246@xxxxxxxxx> Reviewed-by: SeongJae Park <sj@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/damon/ops-common.c | 2 +- mm/damon/paddr.c | 2 +- mm/damon/vaddr.c | 23 +++++++++++++++-------- 3 files changed, 17 insertions(+), 10 deletions(-) --- a/mm/damon/ops-common.c~damon-use-pmdp_get-instead-of-drectly-dereferencing-pmd +++ a/mm/damon/ops-common.c @@ -54,7 +54,7 @@ void damon_ptep_mkold(pte_t *pte, struct void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE - struct folio *folio = damon_get_folio(pmd_pfn(*pmd)); + struct folio *folio = damon_get_folio(pmd_pfn(pmdp_get(pmd))); if (!folio) return; --- a/mm/damon/paddr.c~damon-use-pmdp_get-instead-of-drectly-dereferencing-pmd +++ a/mm/damon/paddr.c @@ -94,7 +94,7 @@ static bool __damon_pa_young(struct foli mmu_notifier_test_young(vma->vm_mm, addr); } else { #ifdef CONFIG_TRANSPARENT_HUGEPAGE - *accessed = pmd_young(*pvmw.pmd) || + *accessed = pmd_young(pmdp_get(pvmw.pmd)) || !folio_test_idle(folio) || mmu_notifier_test_young(vma->vm_mm, addr); #else --- a/mm/damon/vaddr.c~damon-use-pmdp_get-instead-of-drectly-dereferencing-pmd +++ a/mm/damon/vaddr.c @@ -301,16 +301,19 @@ static int damon_mkold_pmd_entry(pmd_t * unsigned long next, struct mm_walk *walk) { pte_t *pte; + pmd_t pmde; spinlock_t *ptl; - if (pmd_trans_huge(*pmd)) { + if (pmd_trans_huge(pmdp_get(pmd))) { ptl = pmd_lock(walk->mm, pmd); - if (!pmd_present(*pmd)) { + pmde = pmdp_get(pmd); + + if (!pmd_present(pmde)) { spin_unlock(ptl); return 0; } - if (pmd_trans_huge(*pmd)) { + if (pmd_trans_huge(pmde)) { damon_pmdp_mkold(pmd, walk->vma, addr); spin_unlock(ptl); return 0; @@ -439,21 +442,25 @@ static int damon_young_pmd_entry(pmd_t * struct damon_young_walk_private *priv = walk->private; #ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (pmd_trans_huge(*pmd)) { + if (pmd_trans_huge(pmdp_get(pmd))) { + pmd_t pmde; + ptl = pmd_lock(walk->mm, pmd); - if (!pmd_present(*pmd)) { + pmde = pmdp_get(pmd); + + if (!pmd_present(pmde)) { spin_unlock(ptl); return 0; } - if (!pmd_trans_huge(*pmd)) { + if (!pmd_trans_huge(pmde)) { spin_unlock(ptl); goto regular_page; } - folio = damon_get_folio(pmd_pfn(*pmd)); + folio = damon_get_folio(pmd_pfn(pmde)); if (!folio) goto huge_out; - if (pmd_young(*pmd) || !folio_test_idle(folio) || + if (pmd_young(pmde) || !folio_test_idle(folio) || mmu_notifier_test_young(walk->mm, addr)) priv->young = true; _ Patches currently in -mm which might be from ppbuk5246@xxxxxxxxx are damon-use-pmdp_get-instead-of-drectly-dereferencing-pmd.patch