The patch titled mlock: backout locked_vm adjustment during mmap() has been added to the -mm tree. Its filename is mmap-handle-mlocked-pages-during-map-remap-unmap-mlock-backout-locked_vm-adjustment-during-mmap.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/SubmitChecklist when testing your code *** See http://www.zip.com.au/~akpm/linux/patches/stuff/added-to-mm.txt to find out what to do about this The current -mm tree may be found at http://userweb.kernel.org/~akpm/mmotm/ ------------------------------------------------------ Subject: mlock: backout locked_vm adjustment during mmap() From: Lee Schermerhorn <lee.schermerhorn@xxxxxx> Backout mmap() path locked_vm accounting adjustment from the "handle mlocked pages during map/remap/unmap" patch. Will resubmit as separate patch with its own description. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@xxxxxxxxxxxxxx> Signed-off-by: Lee Schermerhorn <lee.schermerhorn@xxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/mlock.c | 19 ++++++------------- mm/mmap.c | 19 ++++++++----------- 2 files changed, 14 insertions(+), 24 deletions(-) diff -puN mm/mlock.c~mmap-handle-mlocked-pages-during-map-remap-unmap-mlock-backout-locked_vm-adjustment-during-mmap mm/mlock.c --- a/mm/mlock.c~mmap-handle-mlocked-pages-during-map-remap-unmap-mlock-backout-locked_vm-adjustment-during-mmap +++ a/mm/mlock.c @@ -246,7 +246,7 @@ int mlock_vma_pages_range(struct vm_area unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; - int nr_pages = (end - start) / PAGE_SIZE; + int error = 0; BUG_ON(!(vma->vm_flags & VM_LOCKED)); /* @@ -259,8 +259,7 @@ int mlock_vma_pages_range(struct vm_area is_vm_hugetlb_page(vma) || vma == get_gate_vma(current))) { downgrade_write(&mm->mmap_sem); - nr_pages = __mlock_vma_pages_range(vma, start, end, 1); - + error = __mlock_vma_pages_range(vma, start, end, 1); up_read(&mm->mmap_sem); /* vma can change or disappear */ down_write(&mm->mmap_sem); @@ -268,22 +267,20 @@ int mlock_vma_pages_range(struct vm_area /* non-NULL vma must contain @start, but need to check @end */ if (!vma || end > vma->vm_end) return -EAGAIN; - return nr_pages; + return error; } /* * User mapped kernel pages or huge pages: * make these pages present to populate the ptes, but - * fall thru' to reset VM_LOCKED--no need to unlock, and - * return nr_pages so these don't get counted against task's - * locked limit. huge pages are already counted against - * locked vm limit. + * fall thru' to reset VM_LOCKED so we don't try to munlock + * this vma during munmap()/munlock(). */ make_pages_present(start, end); no_mlock: vma->vm_flags &= ~VM_LOCKED; /* and don't come back! */ - return nr_pages; /* pages NOT mlocked */ + return error; } @@ -372,10 +369,6 @@ success: downgrade_write(&mm->mmap_sem); ret = __mlock_vma_pages_range(vma, start, end, 1); - if (ret > 0) { - mm->locked_vm -= ret; - ret = 0; - } /* * Need to reacquire mmap sem in write mode, as our callers * expect this. We have no support for atomically upgrading diff -puN mm/mmap.c~mmap-handle-mlocked-pages-during-map-remap-unmap-mlock-backout-locked_vm-adjustment-during-mmap mm/mmap.c --- a/mm/mmap.c~mmap-handle-mlocked-pages-during-map-remap-unmap-mlock-backout-locked_vm-adjustment-during-mmap +++ a/mm/mmap.c @@ -1224,10 +1224,10 @@ out: /* * makes pages present; downgrades, drops, reacquires mmap_sem */ - int nr_pages = mlock_vma_pages_range(vma, addr, addr + len); - if (nr_pages < 0) - return nr_pages; /* vma gone! */ - mm->locked_vm += (len >> PAGE_SHIFT) - nr_pages; + int error = mlock_vma_pages_range(vma, addr, addr + len); + if (error < 0) + return error; /* vma gone! */ + mm->locked_vm += (len >> PAGE_SHIFT); } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK)) make_pages_present(addr, addr + len); return addr; @@ -1702,8 +1702,7 @@ find_extend_vma(struct mm_struct *mm, un if (!prev || expand_stack(prev, addr)) return NULL; if (prev->vm_flags & VM_LOCKED) { - int nr_pages = mlock_vma_pages_range(prev, addr, prev->vm_end); - if (nr_pages < 0) + if (mlock_vma_pages_range(prev, addr, prev->vm_end) < 0) return NULL; /* vma gone! */ } return prev; @@ -1732,8 +1731,7 @@ find_extend_vma(struct mm_struct * mm, u if (expand_stack(vma, addr)) return NULL; if (vma->vm_flags & VM_LOCKED) { - int nr_pages = mlock_vma_pages_range(vma, addr, start); - if (nr_pages < 0) + if (mlock_vma_pages_range(vma, addr, start) < 0) return NULL; /* vma gone! */ } return vma; @@ -2068,9 +2066,8 @@ unsigned long do_brk(unsigned long addr, out: mm->total_vm += len >> PAGE_SHIFT; if (flags & VM_LOCKED) { - int nr_pages = mlock_vma_pages_range(vma, addr, addr + len); - if (nr_pages >= 0) - mm->locked_vm += (len >> PAGE_SHIFT) - nr_pages; + if (mlock_vma_pages_range(vma, addr, addr + len) >= 0) + mm->locked_vm += (len >> PAGE_SHIFT); } return addr; } _ Patches currently in -mm which might be from lee.schermerhorn@xxxxxx are vmscan-use-an-indexed-array-for-lru-variables.patch define-page_file_cache-function.patch vmscan-split-lru-lists-into-anon-file-sets.patch pageflag-helpers-for-configed-out-flags.patch unevictable-lru-infrastructure.patch unevictable-lru-infrastructure-nommu-fix.patch unevictable-lru-infrastructure-remember-pages-active-state.patch unevictable-lru-infrastructure-defer-vm-event-counting.patch unevictable-infrastructure-lru-add-event-counting-with-statistics.patch unevictable-lru-page-statistics.patch ramfs-and-ram-disk-pages-are-unevictable.patch shm_locked-pages-are-unevictable.patch shm_locked-pages-are-unevictable-add-event-counts-to-list-scan.patch mlock-mlocked-pages-are-unevictable.patch mlock-mlocked-pages-are-unevictable-fix.patch doc-unevictable-lru-and-mlocked-pages-documentation.patch doc-unevictable-lru-and-mlocked-pages-documentation-update.patch doc-unevictable-lru-and-mlocked-pages-documentation-update-2.patch mlock-downgrade-mmap-sem-while-populating-mlocked-regions.patch mmap-handle-mlocked-pages-during-map-remap-unmap.patch mmap-handle-mlocked-pages-during-map-remap-unmap-mlock-fix-__mlock_vma_pages_range-comment-block.patch mmap-handle-mlocked-pages-during-map-remap-unmap-mlock-backout-locked_vm-adjustment-during-mmap.patch vmstat-mlocked-pages-statistics.patch vmstat-mlocked-pages-statistics-mlocked-pages-add-event-counting-with-statistics.patch swap-cull-unevictable-pages-in-fault-path.patch vmscan-unevictable-lru-scan-sysctl.patch mlock-count-attempts-to-free-mlocked-page-2.patch mlock-resubmit-locked_vm-adjustment-as-separate-patch.patch mlock-fix-return-value-for-munmap-mlock-vma-race.patch mlock-update-locked_vm-on-munmap-of-mlocked-region.patch mlock-revert-mainline-handling-of-mlock-error-return.patch mlock-make-mlock-error-return-posixly-correct.patch -- To unsubscribe from this list: send the line "unsubscribe mm-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html