Re: [PATCH] mm/mmap.c: Only call vma_unlock_anon_vm() when failure occurs in expand_upwards() and expand_downwards()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



At present, I can use git client via 21cn mail address. Hope it can be
accepted by our mailing list.

Thanks.

On 9/1/15 21:49, Chen Gang wrote:
> 
> Sorry for the incorrect format of the patch. So I put the patch into the
> attachment which generated by "git format-patch -M HEAD^". Please help
> check, thanks.
> 
> Next, I shall try to find another mail address which can be accepted by
> both China and our mailing list.
> 
> Thanks.
> 
> On 9/1/15 04:54, Chen Gang wrote:
>> When failure occurs, we need not call khugepaged_enter_vma_merge() or
>> validate_mm().
>>
>> Also simplify do_munmap(): declare 'error' 1 time instead of 2 times in
>> sub-blocks.
>>
>> Signed-off-by: Chen Gang <gang.chen.5i5j@xxxxxxxxx>
>> ---
>> mm/mmap.c | 116 +++++++++++++++++++++++++++++++-------------------------------
>> 1 file changed, 58 insertions(+), 58 deletions(-)
>>
>> diff --git a/mm/mmap.c b/mm/mmap.c
>> index df6d5f0..d32199a 100644
>> --- a/mm/mmap.c
>> +++ b/mm/mmap.c
>> @@ -2182,10 +2182,9 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
>> if (address < PAGE_ALIGN(address+4))
>> address = PAGE_ALIGN(address+4);
>> else {
>> - vma_unlock_anon_vma(vma);
>> - return -ENOMEM;
>> + error = -ENOMEM;
>> + goto err;
>> }
>> - error = 0;
>>
>> /* Somebody else might have raced and expanded it already */
>> if (address> vma->vm_end) {
>> @@ -2194,38 +2193,39 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
>> size = address - vma->vm_start;
>> grow = (address - vma->vm_end)>> PAGE_SHIFT;
>>
>> - error = -ENOMEM;
>> - if (vma->vm_pgoff + (size>> PAGE_SHIFT)>= vma->vm_pgoff) {
>> - error = acct_stack_growth(vma, size, grow);
>> - if (!error) {
>> - /*
>> - * vma_gap_update() doesn't support concurrent
>> - * updates, but we only hold a shared mmap_sem
>> - * lock here, so we need to protect against
>> - * concurrent vma expansions.
>> - * vma_lock_anon_vma() doesn't help here, as
>> - * we don't guarantee that all growable vmas
>> - * in a mm share the same root anon vma.
>> - * So, we reuse mm->page_table_lock to guard
>> - * against concurrent vma expansions.
>> - */
>> - spin_lock(&vma->vm_mm->page_table_lock);
>> - anon_vma_interval_tree_pre_update_vma(vma);
>> - vma->vm_end = address;
>> - anon_vma_interval_tree_post_update_vma(vma);
>> - if (vma->vm_next)
>> - vma_gap_update(vma->vm_next);
>> - else
>> - vma->vm_mm->highest_vm_end = address;
>> - spin_unlock(&vma->vm_mm->page_table_lock);
>> -
>> - perf_event_mmap(vma);
>> - }
>> + if (vma->vm_pgoff + (size>> PAGE_SHIFT) < vma->vm_pgoff) {
>> + error = -ENOMEM;
>> + goto err;
>> }
>> + error = acct_stack_growth(vma, size, grow);
>> + if (error)
>> + goto err;
>> + /*
>> + * vma_gap_update() doesn't support concurrent updates, but we
>> + * only hold a shared mmap_sem lock here, so we need to protect
>> + * against concurrent vma expansions. vma_lock_anon_vma()
>> + * doesn't help here, as we don't guarantee that all growable
>> + * vmas in a mm share the same root anon vma. So, we reuse mm->
>> + * page_table_lock to guard against concurrent vma expansions.
>> + */
>> + spin_lock(&vma->vm_mm->page_table_lock);
>> + anon_vma_interval_tree_pre_update_vma(vma);
>> + vma->vm_end = address;
>> + anon_vma_interval_tree_post_update_vma(vma);
>> + if (vma->vm_next)
>> + vma_gap_update(vma->vm_next);
>> + else
>> + vma->vm_mm->highest_vm_end = address;
>> + spin_unlock(&vma->vm_mm->page_table_lock);
>> +
>> + perf_event_mmap(vma);
>> }
>> vma_unlock_anon_vma(vma);
>> khugepaged_enter_vma_merge(vma, vma->vm_flags);
>> validate_mm(vma->vm_mm);
>> + return 0;
>> +err:
>> + vma_unlock_anon_vma(vma);
>> return error;
>> }
>> #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
>> @@ -2265,36 +2265,37 @@ int expand_downwards(struct vm_area_struct *vma,
>> size = vma->vm_end - address;
>> grow = (vma->vm_start - address)>> PAGE_SHIFT;
>>
>> - error = -ENOMEM;
>> - if (grow <= vma->vm_pgoff) {
>> - error = acct_stack_growth(vma, size, grow);
>> - if (!error) {
>> - /*
>> - * vma_gap_update() doesn't support concurrent
>> - * updates, but we only hold a shared mmap_sem
>> - * lock here, so we need to protect against
>> - * concurrent vma expansions.
>> - * vma_lock_anon_vma() doesn't help here, as
>> - * we don't guarantee that all growable vmas
>> - * in a mm share the same root anon vma.
>> - * So, we reuse mm->page_table_lock to guard
>> - * against concurrent vma expansions.
>> - */
>> - spin_lock(&vma->vm_mm->page_table_lock);
>> - anon_vma_interval_tree_pre_update_vma(vma);
>> - vma->vm_start = address;
>> - vma->vm_pgoff -= grow;
>> - anon_vma_interval_tree_post_update_vma(vma);
>> - vma_gap_update(vma);
>> - spin_unlock(&vma->vm_mm->page_table_lock);
>> -
>> - perf_event_mmap(vma);
>> - }
>> + if (grow> vma->vm_pgoff) {
>> + error = -ENOMEM;
>> + goto err;
>> }
>> + error = acct_stack_growth(vma, size, grow);
>> + if (error)
>> + goto err;
>> + /*
>> + * vma_gap_update() doesn't support concurrent updates, but we
>> + * only hold a shared mmap_sem lock here, so we need to protect
>> + * against concurrent vma expansions. vma_lock_anon_vma()
>> + * doesn't help here, as we don't guarantee that all growable
>> + * vmas in a mm share the same root anon vma. So, we reuse mm->
>> + * page_table_lock to guard against concurrent vma expansions.
>> + */
>> + spin_lock(&vma->vm_mm->page_table_lock);
>> + anon_vma_interval_tree_pre_update_vma(vma);
>> + vma->vm_start = address;
>> + vma->vm_pgoff -= grow;
>> + anon_vma_interval_tree_post_update_vma(vma);
>> + vma_gap_update(vma);
>> + spin_unlock(&vma->vm_mm->page_table_lock);
>> +
>> + perf_event_mmap(vma);
>> }
>> vma_unlock_anon_vma(vma);
>> khugepaged_enter_vma_merge(vma, vma->vm_flags);
>> validate_mm(vma->vm_mm);
>> + return 0;
>> +err:
>> + vma_unlock_anon_vma(vma);
>> return error;
>> }
>>
>> @@ -2542,6 +2543,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
>> {
>> unsigned long end;
>> struct vm_area_struct *vma, *prev, *last;
>> + int error;
>>
>> if ((start & ~PAGE_MASK) || start> TASK_SIZE || len> TASK_SIZE-start)
>> return -EINVAL;
>> @@ -2570,8 +2572,6 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
>> * places tmp vma above, and higher split_vma places tmp vma below.
>> */
>> if (start> vma->vm_start) {
>> - int error;
>> -
>> /*
>> * Make sure that map_count on return from munmap() will
>> * not exceed its limit; but let map_count go just above
>> @@ -2589,7 +2589,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
>> /* Does it split the last one? */
>> last = find_vma(mm, end);
>> if (last && end> last->vm_start) {
>> - int error = __split_vma(mm, last, end, 1);
>> + error = __split_vma(mm, last, end, 1);
>> if (error)
>> return error;
>> }
>> --
>> 1.9.3
>>
>>
>>
> 
> --
> Chen Gang
> 
> Open, share, and attitude like air, water, and life which God blessed
>  		 	   		  
> 

-- 
Chen Gang

Open, share, and attitude like air, water, and life which God blessed

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@xxxxxxxxx.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@xxxxxxxxx";> email@xxxxxxxxx </a>



[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux]     [Linux OMAP]     [Linux MIPS]     [ECOS]     [Asterisk Internet PBX]     [Linux API]