On 1/28/21 2:48 PM, Axel Rasmussen wrote: > From: Peter Xu <peterx@xxxxxxxxxx> > > Huge pmd sharing for hugetlbfs is racy with userfaultfd-wp because > userfaultfd-wp is always based on pgtable entries, so they cannot be shared. > > Walk the hugetlb range and unshare all such mappings if there is, right before > UFFDIO_REGISTER will succeed and return to userspace. > > This will pair with want_pmd_share() in hugetlb code so that huge pmd sharing > is completely disabled for userfaultfd-wp registered range. > > Signed-off-by: Peter Xu <peterx@xxxxxxxxxx> > Signed-off-by: Axel Rasmussen <axelrasmussen@xxxxxxxxxx> > --- > fs/userfaultfd.c | 45 ++++++++++++++++++++++++++++++++++++ > include/linux/mmu_notifier.h | 1 + > 2 files changed, 46 insertions(+) > > diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c > index 894cc28142e7..2c6706ac2504 100644 > --- a/fs/userfaultfd.c > +++ b/fs/userfaultfd.c > @@ -15,6 +15,7 @@ > #include <linux/sched/signal.h> > #include <linux/sched/mm.h> > #include <linux/mm.h> > +#include <linux/mmu_notifier.h> > #include <linux/poll.h> > #include <linux/slab.h> > #include <linux/seq_file.h> > @@ -1190,6 +1191,47 @@ static ssize_t userfaultfd_read(struct file *file, char __user *buf, > } > } > > +/* > + * This function will unconditionally remove all the shared pmd pgtable entries > + * within the specific vma for a hugetlbfs memory range. > + */ > +static void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) > +{ > +#ifdef CONFIG_HUGETLB_PAGE > + struct hstate *h = hstate_vma(vma); > + unsigned long sz = huge_page_size(h); > + struct mm_struct *mm = vma->vm_mm; > + struct mmu_notifier_range range; > + unsigned long address; > + spinlock_t *ptl; > + pte_t *ptep; > + Perhaps we should add a quick to see if vma is sharable. Might be as simple as !(vma->vm_flags & VM_MAYSHARE). I see a comment/question in a later patch about only doing minor fault processing on shared mappings. Code below looks fine, but it would be a wast to do all that for a vma that could not be shared. -- Mike Kravetz > + /* > + * No need to call adjust_range_if_pmd_sharing_possible(), because > + * we're going to operate on the whole vma > + */ > + mmu_notifier_range_init(&range, MMU_NOTIFY_HUGETLB_UNSHARE, > + 0, vma, mm, vma->vm_start, vma->vm_end); > + mmu_notifier_invalidate_range_start(&range); > + i_mmap_lock_write(vma->vm_file->f_mapping); > + for (address = vma->vm_start; address < vma->vm_end; address += sz) { > + ptep = huge_pte_offset(mm, address, sz); > + if (!ptep) > + continue; > + ptl = huge_pte_lock(h, mm, ptep); > + huge_pmd_unshare(mm, vma, &address, ptep); > + spin_unlock(ptl); > + } > + flush_hugetlb_tlb_range(vma, vma->vm_start, vma->vm_end); > + i_mmap_unlock_write(vma->vm_file->f_mapping); > + /* > + * No need to call mmu_notifier_invalidate_range(), see > + * Documentation/vm/mmu_notifier.rst. > + */ > + mmu_notifier_invalidate_range_end(&range); > +#endif > +} > + > static void __wake_userfault(struct userfaultfd_ctx *ctx, > struct userfaultfd_wake_range *range) > { > @@ -1448,6 +1490,9 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, > vma->vm_flags = new_flags; > vma->vm_userfaultfd_ctx.ctx = ctx; > > + if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma)) > + hugetlb_unshare_all_pmds(vma); > + > skip: > prev = vma; > start = vma->vm_end; > diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h > index b8200782dede..ff50c8528113 100644 > --- a/include/linux/mmu_notifier.h > +++ b/include/linux/mmu_notifier.h > @@ -51,6 +51,7 @@ enum mmu_notifier_event { > MMU_NOTIFY_SOFT_DIRTY, > MMU_NOTIFY_RELEASE, > MMU_NOTIFY_MIGRATE, > + MMU_NOTIFY_HUGETLB_UNSHARE, > }; > > #define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0) >