When mmap_sem will be moved to a range lock, some assertion done in the code are no more valid, like the one ensuring mmap_sem is held. This patch should be reverted later and some check might be reviewed once the range locking API provides dedicated services. Signed-off-by: Laurent Dufour <ldufour@xxxxxxxxxxxxxxxxxx> --- arch/x86/events/core.c | 1 - fs/userfaultfd.c | 6 ------ include/linux/huge_mm.h | 2 -- mm/gup.c | 1 - mm/memory.c | 12 +++--------- mm/pagewalk.c | 3 --- 6 files changed, 3 insertions(+), 22 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 580b60f5ac83..86beb42376b8 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2120,7 +2120,6 @@ static void x86_pmu_event_mapped(struct perf_event *event) * For now, this can't happen because all callers hold mmap_sem * for write. If this changes, we'll need a different solution. */ - lockdep_assert_held_exclusive(¤t->mm->mmap_sem); if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1) on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1); diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index b83117741b11..5752b3b65638 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -222,8 +222,6 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, pte_t *pte; bool ret = true; - VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); - pte = huge_pte_offset(mm, address); if (!pte) goto out; @@ -271,8 +269,6 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, pte_t *pte; bool ret = true; - VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); - pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) goto out; @@ -340,8 +336,6 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) bool must_wait, return_to_userland; long blocking_state; - BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); - ret = VM_FAULT_SIGBUS; ctx = vmf->vma->vm_userfaultfd_ctx.ctx; if (!ctx) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index a3762d49ba39..d400014892c7 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -161,7 +161,6 @@ extern spinlock_t *__pud_trans_huge_lock(pud_t *pud, static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) { - VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) return __pmd_trans_huge_lock(pmd, vma); else @@ -170,7 +169,6 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) { - VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); if (pud_trans_huge(*pud) || pud_devmap(*pud)) return __pud_trans_huge_lock(pud, vma); else diff --git a/mm/gup.c b/mm/gup.c index b83b47804c6e..ad83cfa38649 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1040,7 +1040,6 @@ long populate_vma_page_range(struct vm_area_struct *vma, VM_BUG_ON(end & ~PAGE_MASK); VM_BUG_ON_VMA(start < vma->vm_start, vma); VM_BUG_ON_VMA(end > vma->vm_end, vma); - VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK; if (vma->vm_flags & VM_LOCKONFAULT) diff --git a/mm/memory.c b/mm/memory.c index 745acb75b3b4..9adb7d4396bf 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1298,8 +1298,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, next = pmd_addr_end(addr, end); if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { if (next - addr != HPAGE_PMD_SIZE) { - VM_BUG_ON_VMA(vma_is_anonymous(vma) && - !rwsem_is_locked(&tlb->mm->mmap_sem), vma); + VM_BUG_ON_VMA(vma_is_anonymous(vma), vma); __split_huge_pmd(vma, pmd, addr, false, NULL); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) goto next; @@ -1334,10 +1333,9 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb, do { next = pud_addr_end(addr, end); if (pud_trans_huge(*pud) || pud_devmap(*pud)) { - if (next - addr != HPAGE_PUD_SIZE) { - VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma); + if (next - addr != HPAGE_PUD_SIZE) split_huge_pud(vma, pud, addr); - } else if (zap_huge_pud(tlb, vma, pud, addr)) + else if (zap_huge_pud(tlb, vma, pud, addr)) goto next; /* fall through */ } @@ -4305,10 +4303,6 @@ void __might_fault(const char *file, int line) if (pagefault_disabled()) return; __might_sleep(file, line, 0); -#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) - if (current->mm) - might_lock_read(¤t->mm->mmap_sem); -#endif } EXPORT_SYMBOL(__might_fault); #endif diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 60f7856e508f..13429c7815c9 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -293,8 +293,6 @@ int walk_page_range(unsigned long start, unsigned long end, if (!walk->mm) return -EINVAL; - VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm); - vma = find_vma(walk->mm, start); do { if (!vma) { /* after the last vma */ @@ -336,7 +334,6 @@ int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk) if (!walk->mm) return -EINVAL; - VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem)); VM_BUG_ON(!vma); walk->vma = vma; err = walk_page_test(vma->vm_start, vma->vm_end, walk); -- 2.7.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>