Update the fault handler to use a pseudo-vma when the original vma is anonymous. This is in preparation to handling such faults with a fine grained range lock in a later change. Signed-off-by: Michel Lespinasse <walken@xxxxxxxxxx> --- arch/x86/mm/fault.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git arch/x86/mm/fault.c arch/x86/mm/fault.c index 700da3cc3db9..52333272e14e 100644 --- arch/x86/mm/fault.c +++ arch/x86/mm/fault.c @@ -1279,7 +1279,7 @@ void do_user_addr_fault(struct pt_regs *regs, unsigned long address) { struct mm_lock_range *range; - struct vm_area_struct *vma; + struct vm_area_struct pvma, *vma; struct task_struct *tsk; struct mm_struct *mm; vm_fault_t fault, major = 0; @@ -1423,6 +1423,23 @@ void do_user_addr_fault(struct pt_regs *regs, return; } + if (vma_is_anonymous(vma)) { + /* + * Allocate anon_vma if needed. + * This needs to operate on the vma of record. + */ + fault = prepare_mm_fault(vma, flags); + if (fault) + goto got_fault; + + /* + * Copy vma attributes into a pseudo-vma. + * This will be required when using fine grained locks. + */ + pvma = *vma; + vma = &pvma; + } + /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo @@ -1437,6 +1454,7 @@ void do_user_addr_fault(struct pt_regs *regs, * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags. */ fault = handle_mm_fault_range(vma, address, flags, range); +got_fault: major |= fault & VM_FAULT_MAJOR; /* -- 2.25.0.341.g760bfbb309-goog