From: "Liam R. Howlett" <Liam.Howlett@xxxxxxxxxx> __vma_link_file() resolves the mapping from the file, if there is one. Pass through the mapping and check the vm_file externally since most places already have the required information and check of vm_file. Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> --- mm/mmap.c | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index 01985a0d664e..6a37f5d2e496 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -249,6 +249,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) if (brk < min_brk) goto out; + /* * Check against rlimit here. If this check is done later after the test * of oldbrk with newbrk then it can escape the test and let the data @@ -325,7 +326,6 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) if (populate) mm_populate(oldbrk, newbrk - oldbrk); return brk; - out: mmap_write_unlock(mm); return origbrk; @@ -457,21 +457,15 @@ static unsigned long count_vma_pages_range(struct mm_struct *mm, return nr_pages; } -static void __vma_link_file(struct vm_area_struct *vma) +static void __vma_link_file(struct vm_area_struct *vma, + struct address_space *mapping) { - struct file *file; - - file = vma->vm_file; - if (file) { - struct address_space *mapping = file->f_mapping; - - if (vma->vm_flags & VM_SHARED) - mapping_allow_writable(mapping); + if (vma->vm_flags & VM_SHARED) + mapping_allow_writable(mapping); - flush_dcache_mmap_lock(mapping); - vma_interval_tree_insert(vma, &mapping->i_mmap); - flush_dcache_mmap_unlock(mapping); - } + flush_dcache_mmap_lock(mapping); + vma_interval_tree_insert(vma, &mapping->i_mmap); + flush_dcache_mmap_unlock(mapping); } /* @@ -518,10 +512,11 @@ static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) } vma_mas_store(vma, &mas); - __vma_link_file(vma); - if (mapping) + if (mapping) { + __vma_link_file(vma, mapping); i_mmap_unlock_write(mapping); + } mm->map_count++; validate_mm(mm); @@ -762,14 +757,14 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, uprobe_munmap(next, next->vm_start, next->vm_end); i_mmap_lock_write(mapping); - if (insert) { + if (insert && insert->vm_file) { /* * Put into interval tree now, so instantiated pages * are visible to arm/parisc __flush_dcache_page * throughout; but we cannot insert into address * space until vma start or end is updated. */ - __vma_link_file(insert); + __vma_link_file(insert, insert->vm_file->f_mapping); } } @@ -2982,7 +2977,6 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma, struct mm_struct *mm = current->mm; validate_mm_mt(mm); - /* * Check against address space limits by the changed size * Note: This happens *after* clearing old mappings in some code paths. @@ -3037,6 +3031,7 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma, mas_set_range(mas, vma->vm_start, addr + len - 1); mas_store_gfp(mas, vma, GFP_KERNEL); mm->map_count++; + out: perf_event_mmap(vma); mm->total_vm += len >> PAGE_SHIFT; -- 2.35.1