On Wed, 11 Sep 2019 16:05:37 +0100 > > +#define PMD_SHARE_DISABLE_THRESHOLD (1 << 8) > + > /* > * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() > * and returns the corresponding pte. While this is not necessary for the > @@ -4770,11 +4772,24 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) > pte_t *spte = NULL; > pte_t *pte; > spinlock_t *ptl; > + static atomic_t timeout_cnt; > > - if (!vma_shareable(vma, addr)) > - return (pte_t *)pmd_alloc(mm, pud, addr); > + /* > + * Don't share if it is not sharable or locking attempt timed out > + * after 10ms. After 256 timeouts, PMD sharing will be permanently > + * disabled as it is just too slow. > + */ > + if (!vma_shareable(vma, addr) || > + (atomic_read(&timeout_cnt) >= PMD_SHARE_DISABLE_THRESHOLD)) > + goto out_no_share; > + > + if (!i_mmap_timedlock_write(mapping, ms_to_ktime(10))) { > + if (atomic_inc_return(&timeout_cnt) == > + PMD_SHARE_DISABLE_THRESHOLD) > + pr_info("Hugetlbfs PMD sharing disabled because of timeouts!\n"); > + goto out_no_share; > + } atomic_dec_if_positive(&timeout_cnt); The logic to permanently disable pmd sharing does not make much sense without anything like atomic_dec that would have been in their places, with 256 timeouts put aside. > > - i_mmap_lock_write(mapping); > vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { > if (svma == vma) > continue; > @@ -4806,6 +4821,9 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) > pte = (pte_t *)pmd_alloc(mm, pud, addr); > i_mmap_unlock_write(mapping); > return pte; > + > +out_no_share: > + return (pte_t *)pmd_alloc(mm, pud, addr); > }