Commit c0d0381ade79 added code to take i_mmap_rwsem in read mode during fault processing. However, this was observed to increase fault processing time by aprox 33%. Technically, i_mmap_rwsem only needs to be held when pmd sharing is possible. pmd sharing depends on mapping flags, alignment and size. The routine vma_shareable() already checks these conditions. Therefore, use vma_shareable to determine if sharing is possible and if taking i_mmap_rwsem is necessary. This is done during fault processing and vma copying. Code in memory-failure, page migration and userfaultfd continue to always take i_mmap_rwsem. These are not as performance sensitive. Reported-by: kernel test robot <rong.a.chen@xxxxxxxxx> Signed-off-by: Mike Kravetz <mike.kravetz@xxxxxxxxxx> --- mm/hugetlb.c | 96 ++++++++++++++++++++++++++++++++++------------------ 1 file changed, 63 insertions(+), 33 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 5349beda3658..6e9085464e78 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3656,6 +3656,21 @@ static int hugetlb_acct_memory(struct hstate *h, long delta) return ret; } +#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE +static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) +{ + unsigned long base = addr & PUD_MASK; + unsigned long end = base + PUD_SIZE; + + /* + * check on proper vm_flags and page table alignment + */ + if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end)) + return true; + return false; +} +#endif + static void hugetlb_vm_op_open(struct vm_area_struct *vma) { struct resv_map *resv = vma_resv_map(vma); @@ -3807,6 +3822,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, unsigned long sz = huge_page_size(h); struct address_space *mapping = vma->vm_file->f_mapping; struct mmu_notifier_range range; + bool i_mmap_rwsem_held = false; int ret = 0; cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; @@ -3816,14 +3832,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, vma->vm_start, vma->vm_end); mmu_notifier_invalidate_range_start(&range); - } else { - /* - * For shared mappings i_mmap_rwsem must be held to call - * huge_pte_alloc, otherwise the returned ptep could go - * away if part of a shared pmd and another thread calls - * huge_pmd_unshare. - */ - i_mmap_lock_read(mapping); } for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { @@ -3831,6 +3839,28 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, src_pte = huge_pte_offset(src, addr, sz); if (!src_pte) continue; + + /* + * For shared mappings(non-cow) i_mmap_rwsem must be held to + * call huge_pte_alloc, otherwise the returned ptep could go + * away if part of a shared pmd and another thread calls + * huge_pmd_unshare. This is only necessary if the specific + * pmd can be shared. Acquire/drop semaphore as necessary. + */ + if (!cow) { + if (!i_mmap_rwsem_held) { + if (vma_shareable(vma, addr)) { + i_mmap_lock_read(mapping); + i_mmap_rwsem_held = true; + } + } else { + if (!vma_shareable(vma, addr)) { + i_mmap_unlock_read(mapping); + i_mmap_rwsem_held = false; + } + } + } + dst_pte = huge_pte_alloc(dst, addr, sz); if (!dst_pte) { ret = -ENOMEM; @@ -3901,7 +3931,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, if (cow) mmu_notifier_invalidate_range_end(&range); - else + if (i_mmap_rwsem_held) i_mmap_unlock_read(mapping); return ret; @@ -4357,9 +4387,11 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, */ hash = hugetlb_fault_mutex_hash(mapping, idx); mutex_unlock(&hugetlb_fault_mutex_table[hash]); - i_mmap_unlock_read(mapping); + if (vma_shareable(vma, haddr)) + i_mmap_unlock_read(mapping); ret = handle_userfault(&vmf, VM_UFFD_MISSING); - i_mmap_lock_read(mapping); + if (vma_shareable(vma, haddr)) + i_mmap_lock_read(mapping); mutex_lock(&hugetlb_fault_mutex_table[hash]); goto out; } @@ -4543,19 +4575,22 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, } /* - * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold - * until finished with ptep. This prevents huge_pmd_unshare from - * being called elsewhere and making the ptep no longer valid. + * If PMD sharing is possible, acquire i_mmap_rwsem before calling + * huge_pte_alloc and hold until finished with ptep. This prevents + * huge_pmd_unshare from being called elsewhere and making the ptep + * no longer valid. * * ptep could have already be assigned via huge_pte_offset. That * is OK, as huge_pte_alloc will return the same value unless * something has changed. */ mapping = vma->vm_file->f_mapping; - i_mmap_lock_read(mapping); + if (vma_shareable(vma, haddr)) + i_mmap_lock_read(mapping); ptep = huge_pte_alloc(mm, haddr, huge_page_size(h)); if (!ptep) { - i_mmap_unlock_read(mapping); + if (vma_shareable(vma, haddr)) + i_mmap_unlock_read(mapping); return VM_FAULT_OOM; } @@ -4652,7 +4687,8 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, } out_mutex: mutex_unlock(&hugetlb_fault_mutex_table[hash]); - i_mmap_unlock_read(mapping); + if (vma_shareable(vma, haddr)) + i_mmap_unlock_read(mapping); /* * Generally it's safe to hold refcount during waiting page lock. But * here we just wait to defer the next page fault to avoid busy loop and @@ -5287,19 +5323,6 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma, return saddr; } -static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) -{ - unsigned long base = addr & PUD_MASK; - unsigned long end = base + PUD_SIZE; - - /* - * check on proper vm_flags and page table alignment - */ - if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end)) - return true; - return false; -} - /* * Determine if start,end range within vma could be mapped by shared pmd. * If yes, adjust start and end to cover range associated with possible @@ -5335,9 +5358,12 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, * !shared pmd case because we can allocate the pmd later as well, it makes the * code much cleaner. * - * This routine must be called with i_mmap_rwsem held in at least read mode. - * For hugetlbfs, this prevents removal of any page table entries associated - * with the address space. This is important as we are setting up sharing + * FIXME - If sharing is possible, this routine must be called with + * i_mmap_rwsem held in at least read mode. Leaving it up to the caller + * to determine if sharing is possible is asking for trouble. Right now, all + * calling code is correct. But, this needs to be cleaner. Holding + * i_mmap_rwsem prevents removal of any page table entries associated with + * the address space. This is important as we are setting up sharing * based on existing page table entries (mappings). */ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) @@ -5355,6 +5381,10 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) if (!vma_shareable(vma, addr)) return (pte_t *)pmd_alloc(mm, pud, addr); + /* + * If we get here, caller should have acquired i_mmap_rwsem in + * at least read mode. + */ vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { if (svma == vma) continue; -- 2.25.4