From: "Liam R. Howlett" <Liam.Howlett@xxxxxxxxxx> Set the start and end address for munmap when the prev and next are gathered. This is needed to avoid incorrect addresses being used during the vms_complete_munmap_vmas() function if the prev/next vma are expanded. Add a new helper vms_complete_pte_clear(), which is needed later and will avoid growing the argument list to unmap_region() beyond the 9 it already has. Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@xxxxxxxxxx> --- mm/internal.h | 2 ++ mm/mmap.c | 37 ++++++++++++++++++++++++++++++------- 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index 02627e269d6b..ec8441362c28 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1493,6 +1493,8 @@ struct vma_munmap_struct { struct list_head *uf; /* Userfaultfd list_head */ unsigned long start; /* Aligned start addr (inclusive) */ unsigned long end; /* Aligned end addr (exclusive) */ + unsigned long unmap_start; /* Unmap PTE start */ + unsigned long unmap_end; /* Unmap PTE end */ int vma_count; /* Number of vmas that will be removed */ unsigned long nr_pages; /* Number of pages being removed */ unsigned long locked_vm; /* Number of locked pages */ diff --git a/mm/mmap.c b/mm/mmap.c index b940de8c6df8..7cc1f47122f6 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -530,6 +530,8 @@ static inline void init_vma_munmap(struct vma_munmap_struct *vms, vms->vma_count = 0; vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0; vms->exec_vm = vms->stack_vm = vms->data_vm = 0; + vms->unmap_start = FIRST_USER_ADDRESS; + vms->unmap_end = USER_PGTABLES_CEILING; } /* @@ -2612,6 +2614,29 @@ static inline void abort_munmap_vmas(struct ma_state *mas_detach) __mt_destroy(mas_detach->tree); } + +static void vms_complete_pte_clear(struct vma_munmap_struct *vms, + struct ma_state *mas_detach, bool mm_wr_locked) +{ + struct mmu_gather tlb; + + /* + * We can free page tables without write-locking mmap_lock because VMAs + * were isolated before we downgraded mmap_lock. + */ + mas_set(mas_detach, 1); + lru_add_drain(); + tlb_gather_mmu(&tlb, vms->mm); + update_hiwater_rss(vms->mm); + unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end, + vms->vma_count, mm_wr_locked); + mas_set(mas_detach, 1); + /* start and end may be different if there is no prev or next vma. */ + free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start, + vms->unmap_end, mm_wr_locked); + tlb_finish_mmu(&tlb); +} + /* * vms_complete_munmap_vmas() - Finish the munmap() operation * @vms: The vma munmap struct @@ -2633,13 +2658,7 @@ static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms, if (vms->unlock) mmap_write_downgrade(mm); - /* - * We can free page tables without write-locking mmap_lock because VMAs - * were isolated before we downgraded mmap_lock. - */ - mas_set(mas_detach, 1); - unmap_region(mm, mas_detach, vms->vma, vms->prev, vms->next, - vms->start, vms->end, vms->vma_count, !vms->unlock); + vms_complete_pte_clear(vms, mas_detach, !vms->unlock); /* Update high watermark before we lower total_vm */ update_hiwater_vm(mm); /* Stat accounting */ @@ -2701,6 +2720,8 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms, goto start_split_failed; } vms->prev = vma_prev(vms->vmi); + if (vms->prev) + vms->unmap_start = vms->prev->vm_end; /* * Detach a range of VMAs from the mm. Using next as a temp variable as @@ -2763,6 +2784,8 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms, } vms->next = vma_next(vms->vmi); + if (vms->next) + vms->unmap_end = vms->next->vm_start; #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) /* Make sure no VMAs are about to be lost. */ -- 2.43.0