Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> --- mm/mempolicy.c | 44 +++++++++++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 17 deletions(-) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index d79fa299b70c..efffa5e5aabf 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -404,10 +404,13 @@ void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) { struct vm_area_struct *vma; + MA_STATE(mas, &mm->mm_mt, 0, 0); mmap_write_lock(mm); - for (vma = mm->mmap; vma; vma = vma->vm_next) + mas_lock(&mas); + mas_for_each(&mas, vma, ULONG_MAX) mpol_rebind_policy(vma->vm_policy, new); + mas_unlock(&mas); mmap_write_unlock(mm); } @@ -671,7 +674,7 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma, static int queue_pages_test_walk(unsigned long start, unsigned long end, struct mm_walk *walk) { - struct vm_area_struct *vma = walk->vma; + struct vm_area_struct *next, *vma = walk->vma; struct queue_pages *qp = walk->private; unsigned long endvma = vma->vm_end; unsigned long flags = qp->flags; @@ -686,9 +689,10 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end, /* hole at head side of range */ return -EFAULT; } + next = vma_next(vma->vm_mm, vma); if (!(flags & MPOL_MF_DISCONTIG_OK) && ((vma->vm_end < qp->end) && - (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start))) + (!next || vma->vm_end < next->vm_start))) /* hole at middle or tail of range */ return -EFAULT; @@ -802,28 +806,28 @@ static int vma_replace_policy(struct vm_area_struct *vma, static int mbind_range(struct mm_struct *mm, unsigned long start, unsigned long end, struct mempolicy *new_pol) { - struct vm_area_struct *next; struct vm_area_struct *prev; struct vm_area_struct *vma; int err = 0; pgoff_t pgoff; unsigned long vmstart; unsigned long vmend; + MA_STATE(mas, &mm->mm_mt, start, start); - vma = find_vma(mm, start); + rcu_read_lock(); + vma = mas_find(&mas, ULONG_MAX); VM_BUG_ON(!vma); - prev = vma->vm_prev; + prev = mas_prev(&mas, 0); if (start > vma->vm_start) prev = vma; - for (; vma && vma->vm_start < end; prev = vma, vma = next) { - next = vma->vm_next; + mas_for_each(&mas, vma, end - 1) { vmstart = max(start, vma->vm_start); vmend = min(end, vma->vm_end); if (mpol_equal(vma_policy(vma), new_pol)) - continue; + goto next; pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT); @@ -832,7 +836,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, new_pol, vma->vm_userfaultfd_ctx); if (prev) { vma = prev; - next = vma->vm_next; + mas_set(&mas, vma->vm_end); if (mpol_equal(vma_policy(vma), new_pol)) continue; /* vma_merge() joined vma && vma->next, case 8 */ @@ -848,13 +852,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, if (err) goto out; } - replace: +replace: err = vma_replace_policy(vma, new_pol); if (err) goto out; +next: + prev = vma; } - out: +out: + rcu_read_unlock(); return err; } @@ -975,7 +982,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask, * want to return MPOL_DEFAULT in this case. */ mmap_read_lock(mm); - vma = find_vma_intersection(mm, addr, addr+1); + vma = vma_lookup(mm, addr); if (!vma) { mmap_read_unlock(mm); return -EFAULT; @@ -1082,6 +1089,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags) { nodemask_t nmask; + struct vm_area_struct *vma; LIST_HEAD(pagelist); int err = 0; struct migration_target_control mtc = { @@ -1097,8 +1105,9 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest, * need migration. Between passing in the full user address * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. */ + vma = find_vma(mm, 0); VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); - queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, + queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask, flags | MPOL_MF_DISCONTIG_OK, &pagelist); if (!list_empty(&pagelist)) { @@ -1227,14 +1236,15 @@ static struct page *new_page(struct page *page, unsigned long start) { struct vm_area_struct *vma; unsigned long address; + MA_STATE(mas, ¤t->mm->mm_mt, start, start); - vma = find_vma(current->mm, start); - while (vma) { + rcu_read_lock(); + mas_for_each(&mas, vma, ULONG_MAX) { address = page_address_in_vma(page, vma); if (address != -EFAULT) break; - vma = vma->vm_next; } + rcu_read_unlock(); if (PageHuge(page)) { return alloc_huge_page_vma(page_hstate(compound_head(page)), -- 2.30.2