The fault path will immediately fail in handle_mm_fault(), so this is the minimal step which allows the per-VMA lock to be taken on file-backed VMAs. There may be a small performance reduction as a little unnecessary work will be done on each page fault. See later patches for the improvement. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- mm/memory.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index fdaec7772fff..f726f85f0081 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5223,6 +5223,9 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, flags & FAULT_FLAG_REMOTE)) return VM_FAULT_SIGSEGV; + if ((flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vma)) + return VM_FAULT_RETRY; + /* * Enable the memcg OOM handling for faults triggered in user * space. Kernel faults are handled more gracefully. @@ -5275,12 +5278,8 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, if (!vma) goto inval; - /* Only anonymous vmas are supported for now */ - if (!vma_is_anonymous(vma)) - goto inval; - /* find_mergeable_anon_vma uses adjacent vmas which are not locked */ - if (!vma->anon_vma) + if (vma_is_anonymous(vma) && !vma->anon_vma) goto inval; if (!vma_start_read(vma)) -- 2.39.2