Since we limit vm_refcnt at VMA_REF_LIMIT and it's smaller than VMA_LOCK_OFFSET, there is no need to check again if VMA_LOCK_OFFSET bit is set. Remove the extra check and add a clarifying comment. Fixes: e8f32ff00a66 ("mm: replace vm_lock and detached flag with a reference count") Suggested-by: Wei Yang <richard.weiyang@xxxxxxxxx> Signed-off-by: Suren Baghdasaryan <surenb@xxxxxxxxxx> --- Applies over mm-unstable include/linux/mm.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 486638d22fc6..b5f262fc7dc5 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -747,7 +747,11 @@ static inline bool vma_start_read(struct mm_struct *mm, struct vm_area_struct *v rwsem_acquire_read(&vma->vmlock_dep_map, 0, 0, _RET_IP_); - /* Limit at VMA_REF_LIMIT to leave one count for a writer */ + /* + * Limit at VMA_REF_LIMIT to leave one count for a writer. + * If VMA_LOCK_OFFSET is set, __refcount_inc_not_zero_limited() will fail + * because VMA_REF_LIMIT is less than VMA_LOCK_OFFSET. + */ if (unlikely(!__refcount_inc_not_zero_limited(&vma->vm_refcnt, &oldcnt, VMA_REF_LIMIT))) { rwsem_release(&vma->vmlock_dep_map, _RET_IP_); @@ -766,8 +770,7 @@ static inline bool vma_start_read(struct mm_struct *mm, struct vm_area_struct *v * after it has been unlocked. * This pairs with RELEASE semantics in vma_end_write_all(). */ - if (unlikely(oldcnt & VMA_LOCK_OFFSET || - vma->vm_lock_seq == raw_read_seqcount(&mm->mm_lock_seq))) { + if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&mm->mm_lock_seq))) { vma_refcount_put(vma); return false; } base-commit: f349e79bfbf3abfade8011797ff6d0d47b67dab7 -- 2.47.1.613.gc27f4b7a9f-goog