mmu_notifier_invalidate_page() is now be call from under the spinlock. Add a call to mmu_notifier_invalidate_range() for user that need to be able to sleep. Relevent threads: https://lkml.kernel.org/r/20170809204333.27485-1-jglisse@xxxxxxxxxx https://lkml.kernel.org/r/20170804134928.l4klfcnqatni7vsc@xxxxxxxxxxxxxxxxxx https://marc.info/?l=kvm&m=150327081325160&w=2 Signed-off-by: Jérôme Glisse <jglisse@xxxxxxxxxx> Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Cc: Bernhard Held <berny156@xxxxxx> Cc: Adam Borowski <kilobyte@xxxxxxxxxx> Cc: Andrea Arcangeli <aarcange@xxxxxxxxxx> Cc: Radim Krčmář <rkrcmar@xxxxxxxxxx> Cc: Wanpeng Li <kernellwp@xxxxxxxxx> Cc: Paolo Bonzini <pbonzini@xxxxxxxxxx> Cc: Takashi Iwai <tiwai@xxxxxxx> Cc: Nadav Amit <nadav.amit@xxxxxxxxx> Cc: Mike Galbraith <efault@xxxxxx> Cc: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx> Cc: axie <axie@xxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/rmap.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/mm/rmap.c b/mm/rmap.c index c8993c63eb25..06792e28093c 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -887,6 +887,8 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, .address = address, .flags = PVMW_SYNC, }; + unsigned long start = address, end = address; + bool invalidate = false; int *cleaned = arg; while (page_vma_mapped_walk(&pvmw)) { @@ -927,10 +929,16 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, if (ret) { mmu_notifier_invalidate_page(vma->vm_mm, address); + /* range is exclusive */ + end = address + PAGE_SIZE; + invalidate = true; (*cleaned)++; } } + if (invalidate) + mmu_notifier_invalidate_range(vma->vm_mm, start, end); + return true; } @@ -1323,8 +1331,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, }; pte_t pteval; struct page *subpage; - bool ret = true; + bool ret = true, invalidate = false; enum ttu_flags flags = (enum ttu_flags)arg; + unsigned long start = address, end = address; /* munlock has nothing to gain from examining un-locked vmas */ if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) @@ -1491,7 +1500,14 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, page_remove_rmap(subpage, PageHuge(page)); put_page(page); mmu_notifier_invalidate_page(mm, address); + /* range is exclusive */ + end = address + PAGE_SIZE; + invalidate = true; } + + if (invalidate) + mmu_notifier_invalidate_range(vma->vm_mm, start, end); + return ret; } -- 2.13.5 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>