The patch titled Subject: mm: drop mmap_sem before calling balance_dirty_pages() in write fault has been added to the -mm tree. Its filename is mm-drop-mmap_sem-before-calling-balance_dirty_pages-in-write-fault.patch This patch should soon appear at http://ozlabs.org/~akpm/mmots/broken-out/mm-drop-mmap_sem-before-calling-balance_dirty_pages-in-write-fault.patch and later at http://ozlabs.org/~akpm/mmotm/broken-out/mm-drop-mmap_sem-before-calling-balance_dirty_pages-in-write-fault.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Johannes Weiner <hannes@xxxxxxxxxxx> Subject: mm: drop mmap_sem before calling balance_dirty_pages() in write fault One of our services is observing hanging ps/top/etc under heavy write IO, and the task states show this is an mmap_sem priority inversion: A write fault is holding the mmap_sem in read-mode and waiting for (heavily cgroup-limited) IO in balance_dirty_pages(): [<0>] balance_dirty_pages+0x724/0x905 [<0>] balance_dirty_pages_ratelimited+0x254/0x390 [<0>] fault_dirty_shared_page.isra.96+0x4a/0x90 [<0>] do_wp_page+0x33e/0x400 [<0>] __handle_mm_fault+0x6f0/0xfa0 [<0>] handle_mm_fault+0xe4/0x200 [<0>] __do_page_fault+0x22b/0x4a0 [<0>] page_fault+0x45/0x50 [<0>] 0xffffffffffffffff Somebody tries to change the address space, contending for the mmap_sem in write-mode: [<0>] call_rwsem_down_write_failed_killable+0x13/0x20 [<0>] do_mprotect_pkey+0xa8/0x330 [<0>] SyS_mprotect+0xf/0x20 [<0>] do_syscall_64+0x5b/0x100 [<0>] entry_SYSCALL_64_after_hwframe+0x3d/0xa2 [<0>] 0xffffffffffffffff The waiting writer locks out all subsequent readers to avoid lock starvation, and several threads can be seen hanging like this: [<0>] call_rwsem_down_read_failed+0x14/0x30 [<0>] proc_pid_cmdline_read+0xa0/0x480 [<0>] __vfs_read+0x23/0x140 [<0>] vfs_read+0x87/0x130 [<0>] SyS_read+0x42/0x90 [<0>] do_syscall_64+0x5b/0x100 [<0>] entry_SYSCALL_64_after_hwframe+0x3d/0xa2 [<0>] 0xffffffffffffffff To fix this, do what we do for cache read faults already: drop the mmap_sem before calling into anything IO bound, in this case the balance_dirty_pages() function, and return VM_FAULT_RETRY. Link: http://lkml.kernel.org/r/20190924194238.GA29030@xxxxxxxxxxx Signed-off-by: Johannes Weiner <hannes@xxxxxxxxxxx> Reviewed-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Acked-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx> Cc: Josef Bacik <josef@xxxxxxxxxxxxxx> Cc: Hillf Danton <hdanton@xxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/filemap.c | 21 --------------------- mm/internal.h | 21 +++++++++++++++++++++ mm/memory.c | 38 +++++++++++++++++++++++++++----------- 3 files changed, 48 insertions(+), 32 deletions(-) --- a/mm/filemap.c~mm-drop-mmap_sem-before-calling-balance_dirty_pages-in-write-fault +++ a/mm/filemap.c @@ -2328,27 +2328,6 @@ EXPORT_SYMBOL(generic_file_read_iter); #ifdef CONFIG_MMU #define MMAP_LOTSAMISS (100) -static struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, - struct file *fpin) -{ - int flags = vmf->flags; - - if (fpin) - return fpin; - - /* - * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or - * anything, so we only pin the file and drop the mmap_sem if only - * FAULT_FLAG_ALLOW_RETRY is set. - */ - if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) == - FAULT_FLAG_ALLOW_RETRY) { - fpin = get_file(vmf->vma->vm_file); - up_read(&vmf->vma->vm_mm->mmap_sem); - } - return fpin; -} - /* * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_sem * @vmf - the vm_fault for this fault. --- a/mm/internal.h~mm-drop-mmap_sem-before-calling-balance_dirty_pages-in-write-fault +++ a/mm/internal.h @@ -362,6 +362,27 @@ vma_address(struct page *page, struct vm return max(start, vma->vm_start); } +static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, + struct file *fpin) +{ + int flags = vmf->flags; + + if (fpin) + return fpin; + + /* + * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or + * anything, so we only pin the file and drop the mmap_sem if only + * FAULT_FLAG_ALLOW_RETRY is set. + */ + if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) == + FAULT_FLAG_ALLOW_RETRY) { + fpin = get_file(vmf->vma->vm_file); + up_read(&vmf->vma->vm_mm->mmap_sem); + } + return fpin; +} + #else /* !CONFIG_MMU */ static inline void clear_page_mlock(struct page *page) { } static inline void mlock_vma_page(struct page *page) { } --- a/mm/memory.c~mm-drop-mmap_sem-before-calling-balance_dirty_pages-in-write-fault +++ a/mm/memory.c @@ -2227,10 +2227,11 @@ static vm_fault_t do_page_mkwrite(struct * * The function expects the page to be locked and unlocks it. */ -static void fault_dirty_shared_page(struct vm_area_struct *vma, - struct page *page) +static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; struct address_space *mapping; + struct page *page = vmf->page; bool dirtied; bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite; @@ -2245,16 +2246,30 @@ static void fault_dirty_shared_page(stru mapping = page_rmapping(page); unlock_page(page); + if (!page_mkwrite) + file_update_time(vma->vm_file); + + /* + * Throttle page dirtying rate down to writeback speed. + * + * mapping may be NULL here because some device drivers do not + * set page.mapping but still dirty their pages + * + * Drop the mmap_sem before waiting on IO, if we can. The file + * is pinning the mapping, as per above. + */ if ((dirtied || page_mkwrite) && mapping) { - /* - * Some device drivers do not set page.mapping - * but still dirty their pages - */ + struct file *fpin; + + fpin = maybe_unlock_mmap_for_io(vmf, NULL); balance_dirty_pages_ratelimited(mapping); + if (fpin) { + fput(fpin); + return VM_FAULT_RETRY; + } } - if (!page_mkwrite) - file_update_time(vma->vm_file); + return 0; } /* @@ -2497,6 +2512,7 @@ static vm_fault_t wp_page_shared(struct __releases(vmf->ptl) { struct vm_area_struct *vma = vmf->vma; + vm_fault_t ret = VM_FAULT_WRITE; get_page(vmf->page); @@ -2520,10 +2536,10 @@ static vm_fault_t wp_page_shared(struct wp_page_reuse(vmf); lock_page(vmf->page); } - fault_dirty_shared_page(vma, vmf->page); + ret |= fault_dirty_shared_page(vmf); put_page(vmf->page); - return VM_FAULT_WRITE; + return ret; } /* @@ -3567,7 +3583,7 @@ static vm_fault_t do_shared_fault(struct return ret; } - fault_dirty_shared_page(vma, vmf->page); + ret |= fault_dirty_shared_page(vmf); return ret; } _ Patches currently in -mm which might be from hannes@xxxxxxxxxxx are mm-drop-mmap_sem-before-calling-balance_dirty_pages-in-write-fault.patch