Rename prior to generalizing the collapse function. Signed-off-by: Dev Jain <dev.jain@xxxxxxx> --- mm/khugepaged.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 99dc995aac11..95643e6e5f31 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -605,7 +605,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, folio = page_folio(page); VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio); - /* See hpage_collapse_scan_pmd(). */ + /* See hpage_collapse_scan_ptes(). */ if (folio_likely_mapped_shared(folio)) { ++shared; if (cc->is_khugepaged && @@ -991,7 +991,7 @@ static int check_pmd_still_valid(struct mm_struct *mm, /* * Bring missing pages in from swap, to complete THP collapse. - * Only done if hpage_collapse_scan_pmd believes it is worthwhile. + * Only done if hpage_collapse_scan_ptes believes it is worthwhile. * * Called and returns without pte mapped or spinlocks held. * Returns result: if not SCAN_SUCCEED, mmap_lock has been released. @@ -1263,7 +1263,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, return result; } -static int hpage_collapse_scan_pmd(struct mm_struct *mm, +static int hpage_collapse_scan_ptes(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, bool *mmap_locked, struct collapse_control *cc) @@ -2457,7 +2457,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, mmap_read_unlock(mm); } } else { - *result = hpage_collapse_scan_pmd(mm, vma, + *result = hpage_collapse_scan_ptes(mm, vma, khugepaged_scan.address, &mmap_locked, cc); } @@ -2792,7 +2792,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev, cc); fput(file); } else { - result = hpage_collapse_scan_pmd(mm, vma, addr, + result = hpage_collapse_scan_ptes(mm, vma, addr, &mmap_locked, cc); } if (!mmap_locked) -- 2.30.2