From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx> Use vma iterator & find_vma() instead of vma linked list. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> --- mm/huge_memory.c | 4 ++-- mm/khugepaged.c | 11 ++++++++--- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c468fee595ff..c72827d9cf04 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2221,11 +2221,11 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma, split_huge_pmd_if_needed(vma, end); /* - * If we're also updating the vma->vm_next->vm_start, + * If we're also updating the next vma vm_start, * check if we need to split it. */ if (adjust_next > 0) { - struct vm_area_struct *next = vma->vm_next; + struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end); unsigned long nstart = next->vm_start; nstart += adjust_next; split_huge_pmd_if_needed(next, nstart); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 03fda93ade3e..208fc0e19eb1 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -2089,10 +2089,12 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, __releases(&khugepaged_mm_lock) __acquires(&khugepaged_mm_lock) { + struct vma_iterator vmi; struct mm_slot *mm_slot; struct mm_struct *mm; struct vm_area_struct *vma; int progress = 0; + unsigned long address; VM_BUG_ON(!pages); lockdep_assert_held(&khugepaged_mm_lock); @@ -2116,11 +2118,14 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, vma = NULL; if (unlikely(!mmap_read_trylock(mm))) goto breakouterloop_mmap_lock; - if (likely(!khugepaged_test_exit(mm))) - vma = find_vma(mm, khugepaged_scan.address); progress++; - for (; vma; vma = vma->vm_next) { + if (unlikely(khugepaged_test_exit(mm))) + goto breakouterloop; + + address = khugepaged_scan.address; + vma_iter_init(&vmi, mm, address); + for_each_vma(vmi, vma) { unsigned long hstart, hend; cond_resched(); -- 2.35.1