From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx> Use vma iterator & find_vma() instead of vma linked list. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> --- mm/huge_memory.c | 4 ++-- mm/khugepaged.c | 11 ++++++++--- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 83c47a989260..6c5c23ef658a 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2339,11 +2339,11 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma, split_huge_pmd_if_needed(vma, end); /* - * If we're also updating the vma->vm_next->vm_start, + * If we're also updating the next vma vm_start, * check if we need to split it. */ if (adjust_next > 0) { - struct vm_area_struct *next = vma->vm_next; + struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end); unsigned long nstart = next->vm_start; nstart += adjust_next; split_huge_pmd_if_needed(next, nstart); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index d3313b7a8fe5..d8e388106322 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -2053,10 +2053,12 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, __releases(&khugepaged_mm_lock) __acquires(&khugepaged_mm_lock) { + struct vma_iterator vmi; struct mm_slot *mm_slot; struct mm_struct *mm; struct vm_area_struct *vma; int progress = 0; + unsigned long address; VM_BUG_ON(!pages); lockdep_assert_held(&khugepaged_mm_lock); @@ -2081,11 +2083,14 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, vma = NULL; if (unlikely(!mmap_read_trylock(mm))) goto breakouterloop_mmap_lock; - if (likely(!hpage_collapse_test_exit(mm))) - vma = find_vma(mm, khugepaged_scan.address); progress++; - for (; vma; vma = vma->vm_next) { + if (unlikely(hpage_collapse_test_exit(mm))) + goto breakouterloop; + + address = khugepaged_scan.address; + vma_iter_init(&vmi, mm, address); + for_each_vma(vmi, vma) { unsigned long hstart, hend; cond_resched(); -- 2.35.1