From: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Wrap the VMA modifications (vma_adjust/unmap_page_range) with sequence counts such that we can easily test if a VMA is changed. The unmap_page_range() one allows us to make assumptions about page-tables; when we find the seqcount hasn't changed we can assume page-tables are still valid. The flip side is that we cannot distinguish between a vma_adjust() and the unmap_page_range() -- where with the former we could have re-checked the vma bounds against the address. Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> [port to 4.10 kernel] Signed-off-by: Laurent Dufour <ldufour@xxxxxxxxxxxxxxxxxx> --- include/linux/mm_types.h | 1 + mm/memory.c | 2 ++ mm/mmap.c | 13 +++++++++++++ 3 files changed, 16 insertions(+) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 808751d7b737..daa5fbba9349 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -358,6 +358,7 @@ struct vm_area_struct { struct mempolicy *vm_policy; /* NUMA policy for the VMA */ #endif struct vm_userfaultfd_ctx vm_userfaultfd_ctx; + seqcount_t vm_sequence; }; struct core_thread { diff --git a/mm/memory.c b/mm/memory.c index 441c0e3f3a0f..0f7fbee554c4 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1286,6 +1286,7 @@ void unmap_page_range(struct mmu_gather *tlb, unsigned long next; BUG_ON(addr >= end); + write_seqcount_begin(&vma->vm_sequence); tlb_start_vma(tlb, vma); pgd = pgd_offset(vma->vm_mm, addr); do { @@ -1295,6 +1296,7 @@ void unmap_page_range(struct mmu_gather *tlb, next = zap_pud_range(tlb, vma, pgd, addr, next, details); } while (pgd++, addr = next, addr != end); tlb_end_vma(tlb, vma); + write_seqcount_end(&vma->vm_sequence); } diff --git a/mm/mmap.c b/mm/mmap.c index dc4291dcc99b..cb41659bc9f9 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -541,6 +541,8 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, else mm->highest_vm_end = vma->vm_end; + seqcount_init(&vma->vm_sequence); + /* * vma->vm_prev wasn't known when we followed the rbtree to find the * correct insertion point for that vma. As a result, we could not @@ -675,6 +677,10 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, long adjust_next = 0; int remove_next = 0; + write_seqcount_begin(&vma->vm_sequence); + if (next) + write_seqcount_begin_nested(&next->vm_sequence, SINGLE_DEPTH_NESTING); + if (next && !insert) { struct vm_area_struct *exporter = NULL, *importer = NULL; @@ -886,6 +892,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, mm->map_count--; mpol_put(vma_policy(next)); kmem_cache_free(vm_area_cachep, next); + write_seqcount_end(&next->vm_sequence); /* * In mprotect's case 6 (see comments on vma_merge), * we must remove another next too. It would clutter @@ -899,6 +906,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, * "vma->vm_next" gap must be updated. */ next = vma->vm_next; + if (next) + write_seqcount_begin_nested(&next->vm_sequence, SINGLE_DEPTH_NESTING); } else { /* * For the scope of the comment "next" and @@ -945,6 +954,10 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, if (insert && file) uprobe_mmap(insert); + if (next) + write_seqcount_end(&next->vm_sequence); + write_seqcount_end(&vma->vm_sequence); + validate_mm(mm); return 0; -- 2.7.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>