Use helper huge_pte_lock and pmd_lock to simplify the code. No functional change intended. Signed-off-by: Miaohe Lin <linmiaohe@xxxxxxxxxx> --- mm/hugetlb.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 06293da96112..6ea9f73aea84 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6094,8 +6094,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, page_in_pagecache = true; } - ptl = huge_pte_lockptr(h, dst_mm, dst_pte); - spin_lock(ptl); + ptl = huge_pte_lock(h, dst_mm, dst_pte); /* * We allow to overwrite a pte marker: consider when both MISSING|WP @@ -7176,8 +7175,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, return NULL; retry: - ptl = pmd_lockptr(mm, pmd); - spin_lock(ptl); + ptl = pmd_lock(mm, pmd); /* * make sure that the address range covered by this pmd is not * unmapped from other threads. -- 2.23.0