mremap() is modifying the VMA layout and thus must be protected against the speculative page fault handler. XXX: Is the change to vma->vm_flags to set VM_ACCOUNT require the protection ? Signed-off-by: Laurent Dufour <ldufour@xxxxxxxxxxxxxxxxxx> --- mm/mremap.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/mm/mremap.c b/mm/mremap.c index 30d7d2482eea..40c3c869dffc 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -288,6 +288,10 @@ static unsigned long move_vma(struct vm_area_struct *vma, if (!new_vma) return -ENOMEM; + write_seqcount_begin(&vma->vm_sequence); + write_seqcount_begin_nested(&new_vma->vm_sequence, + SINGLE_DEPTH_NESTING); + moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, need_rmap_locks); if (moved_len < old_len) { @@ -304,6 +308,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, */ move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, true); + write_seqcount_end(&vma->vm_sequence); vma = new_vma; old_len = new_len; old_addr = new_addr; @@ -311,7 +316,9 @@ static unsigned long move_vma(struct vm_area_struct *vma, } else { arch_remap(mm, old_addr, old_addr + old_len, new_addr, new_addr + new_len); + write_seqcount_end(&vma->vm_sequence); } + write_seqcount_end(&new_vma->vm_sequence); /* Conceal VM_ACCOUNT so old reservation is not undone */ if (vm_flags & VM_ACCOUNT) { -- 2.7.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>