When a vma is known, avoid calling mm_populate to search for the vma to populate. Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> --- mm/gup.c | 20 ++++++++++++++++++++ mm/internal.h | 4 ++++ 2 files changed, 24 insertions(+) diff --git a/mm/gup.c b/mm/gup.c index c3a17b189064..48fe98ab0729 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1468,6 +1468,26 @@ long populate_vma_page_range(struct vm_area_struct *vma, NULL, NULL, locked); } +/* + * mm_populate_vma() - Populate a single range in a single vma. + * @vma: The vma to populate. + * @start: The start address to populate + * @end: The end address to stop populating + * + * Note: Ignores errors. + */ +void mm_populate_vma(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + struct mm_struct *mm = current->mm; + int locked = 1; + + mmap_read_lock(mm); + populate_vma_page_range(vma, start, end, &locked); + if (locked) + mmap_read_unlock(mm); +} + /* * __mm_populate - populate and/or mlock pages within a range of address space. * diff --git a/mm/internal.h b/mm/internal.h index 7ad55938d391..583f2f1e6ff8 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -346,6 +346,10 @@ static inline bool is_data_mapping(vm_flags_t flags) return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE; } +/* mm/gup.c */ +extern void mm_populate_vma(struct vm_area_struct *vma, unsigned long start, + unsigned long end); + /* Maple tree operations using VMAs */ /* * vma_mas_store() - Store a VMA in the maple tree. -- 2.30.2