Since commit 70e806e4e645 ("mm: Do early cow for pinned pages during fork() for ptes") pages under a FOLL_PIN will not be write protected during COW for fork. This means that pages returned from pin_user_pages(FOLL_WRITE) should not become write protected while the pin is active. However, there is a small race where get_user_pages_fast(FOLL_PIN) can establish a FOLL_PIN at the same time copy_present_page() is write protecting it: CPU 0 CPU 1 get_user_pages_fast() internal_get_user_pages_fast() copy_page_range() pte_alloc_map_lock() copy_present_page() atomic_read(has_pinned) == 0 page_maybe_dma_pinned() == false atomic_set(has_pinned, 1); gup_pgd_range() gup_pte_range() pte_t pte = gup_get_pte(ptep) pte_access_permitted(pte) try_grab_compound_head() pte = maybe_mkwrite() set_pte_at(); pte_unmap_unlock() // GUP now returns with a write protected page The first attempt to resolve this by using the write protect caused problems (and was missing a barrrier), see commit f3c64eda3e50 ("mm: avoid early COW write protect games during fork()") Instead wrap copy_p4d_range() with the write side of something like a seqcount and check the read side around gup_pgd_range(). If there is a collision then get_user_pages_fast() fails and falls back to slow GUP. Slow GUP is safe against this race because copy_page_range() is only called while holding the write side of the mmap_lock on the src mm_struct. Fixes: f3c64eda3e50 ("mm: avoid early COW write protect games during fork()") Suggested-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Link: https://lore.kernel.org/r/CAHk-=wi=iCnYCARbPGjkVJu9eyYeZ13N64tZYLdOB8CP5Q_PLw@xxxxxxxxxxxxxx Signed-off-by: Jason Gunthorpe <jgg@xxxxxxxxxx> --- include/linux/mm_types.h | 6 ++++++ kernel/fork.c | 1 + mm/gup.c | 19 +++++++++++++++++++ mm/memory.c | 16 +++++++++++++++- 4 files changed, 41 insertions(+), 1 deletion(-) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 5a9238f6caad97..8c7c9de476c4f8 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -446,6 +446,12 @@ struct mm_struct { */ atomic_t has_pinned; + /** + * @write_protecet_seq: Odd when any thread is write + * protecting pages in this mm, for instance during fork(). + */ + unsigned long write_protect_seq; + #ifdef CONFIG_MMU atomic_long_t pgtables_bytes; /* PTE page table pages */ #endif diff --git a/kernel/fork.c b/kernel/fork.c index 32083db7a2a23e..342243f621c742 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1007,6 +1007,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, mm->vmacache_seqnum = 0; atomic_set(&mm->mm_users, 1); atomic_set(&mm->mm_count, 1); + mm->write_protect_seq = 0; mmap_init_lock(mm); INIT_LIST_HEAD(&mm->mmlist); mm->core_state = NULL; diff --git a/mm/gup.c b/mm/gup.c index ecbe1639ea2af7..2c1a1e0555479e 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2677,12 +2677,19 @@ static unsigned int lockless_pages_from_mm(unsigned long addr, struct page **pages) { unsigned long flags; + unsigned long seq; int nr_pinned = 0; if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) || !gup_fast_permitted(addr, end)) return 0; + if (gup_flags & FOLL_PIN) { + seq = smp_load_acquire(¤t->mm->write_protect_seq); + if (seq & 1) + return 0; + } + /* * Disable interrupts. The nested form is used, in order to allow full, * general purpose use of this routine. @@ -2697,6 +2704,18 @@ static unsigned int lockless_pages_from_mm(unsigned long addr, local_irq_save(flags); gup_pgd_range(addr, end, gup_flags, pages, &nr_pinned); local_irq_restore(flags); + + /* + * When pinning pages for DMA there could be a concurrent write protect + * from fork() via copy_page_range(), in this case always fail fast GUP. + */ + if (gup_flags & FOLL_PIN) { + smp_rmb(); + if (READ_ONCE(current->mm->write_protect_seq) != seq) { + unpin_user_pages(pages, nr_pinned); + return 0; + } + } return nr_pinned; } diff --git a/mm/memory.c b/mm/memory.c index c48f8df6e50268..e2f959cce8563d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1171,6 +1171,17 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0, src_vma, src_mm, addr, end); mmu_notifier_invalidate_range_start(&range); + /* + * This is like a seqcount where the mmap_lock provides + * serialization for the write side. However, unlike seqcount + * the read side falls back to obtaining the mmap_lock rather + * than spinning. For this reason none of the preempt related + * machinery in seqcount is desired here. + */ + mmap_assert_write_locked(src_mm); + WRITE_ONCE(src_mm->write_protect_seq, + src_mm->write_protect_seq + 1); + smp_wmb(); } ret = 0; @@ -1187,8 +1198,11 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) } } while (dst_pgd++, src_pgd++, addr = next, addr != end); - if (is_cow) + if (is_cow) { + smp_store_release(&src_mm->write_protect_seq, + src_mm->write_protect_seq + 1); mmu_notifier_invalidate_range_end(&range); + } return ret; } -- 2.28.0