Suren Baghdasaryan <surenb@xxxxxxxxxx> writes: > migration_entry_wait does not need VMA lock, therefore it can be > dropped before waiting. > > Signed-off-by: Suren Baghdasaryan <surenb@xxxxxxxxxx> > --- > mm/memory.c | 14 ++++++++++++-- > 1 file changed, 12 insertions(+), 2 deletions(-) > > diff --git a/mm/memory.c b/mm/memory.c > index 5caaa4c66ea2..bdf46fdc58d6 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -3715,8 +3715,18 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) > entry = pte_to_swp_entry(vmf->orig_pte); > if (unlikely(non_swap_entry(entry))) { > if (is_migration_entry(entry)) { > - migration_entry_wait(vma->vm_mm, vmf->pmd, > - vmf->address); > + /* Save mm in case VMA lock is dropped */ > + struct mm_struct *mm = vma->vm_mm; > + > + if (vmf->flags & FAULT_FLAG_VMA_LOCK) { > + /* > + * No need to hold VMA lock for migration. > + * WARNING: vma can't be used after this! > + */ > + vma_end_read(vma); > + ret |= VM_FAULT_COMPLETED; Doesn't this need to also set FAULT_FLAG_LOCK_DROPPED to ensure we don't call vma_end_read() again in __handle_mm_fault()? > + } > + migration_entry_wait(mm, vmf->pmd, vmf->address); > } else if (is_device_exclusive_entry(entry)) { > vmf->page = pfn_swap_entry_to_page(entry); > ret = remove_device_exclusive_entry(vmf);