Add a range field to struct vm_fault. This carries the range that was locked for the given fault. Faults that release the mmap_sem should pass the specified range. Signed-off-by: Michel Lespinasse <walken@xxxxxxxxxx> --- include/linux/mm.h | 1 + mm/hugetlb.c | 1 + mm/khugepaged.c | 1 + mm/memory.c | 1 + 4 files changed, 4 insertions(+) diff --git include/linux/mm.h include/linux/mm.h index 052f423d7f67..a1c9a0aa898b 100644 --- include/linux/mm.h +++ include/linux/mm.h @@ -451,6 +451,7 @@ struct vm_fault { * page table to avoid allocation from * atomic context. */ + struct mm_lock_range *range; /* MM read lock range. */ }; /* page entry size for vm->huge_fault() */ diff --git mm/hugetlb.c mm/hugetlb.c index dd8737a94bec..662f34b6c869 100644 --- mm/hugetlb.c +++ mm/hugetlb.c @@ -3831,6 +3831,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, .vma = vma, .address = haddr, .flags = flags, + .range = mm_coarse_lock_range(), /* * Hard to debug if it ends up being * used by a callee that assumes diff --git mm/khugepaged.c mm/khugepaged.c index 7ee8ae64824b..a7807bb0d631 100644 --- mm/khugepaged.c +++ mm/khugepaged.c @@ -900,6 +900,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, .flags = FAULT_FLAG_ALLOW_RETRY, .pmd = pmd, .pgoff = linear_page_index(vma, address), + .range = mm_coarse_lock_range(), }; /* we only decide to swapin, if there is enough young ptes */ diff --git mm/memory.c mm/memory.c index 45b42fa02a2e..6cb3359f0857 100644 --- mm/memory.c +++ mm/memory.c @@ -4047,6 +4047,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, .flags = flags, .pgoff = linear_page_index(vma, address), .gfp_mask = __get_fault_gfp_mask(vma), + .range = mm_coarse_lock_range(), }; unsigned int dirty = flags & FAULT_FLAG_WRITE; struct mm_struct *mm = vma->vm_mm; -- 2.25.0.341.g760bfbb309-goog