On Fri, Dec 13, 2024 at 02:16:51PM -0800, Matthew Brost wrote: > On Fri, Nov 29, 2024 at 10:31:32AM +1100, Alistair Popple wrote: > > > > Matthew Brost <matthew.brost@xxxxxxxxx> writes: > > > > > Avoid multiple CPU page faults to the same device page racing by trying > > > to lock the page in do_swap_page before taking an extra reference to the > > > page. This prevents scenarios where multiple CPU page faults each take > > > an extra reference to a device page, which could abort migration in > > > folio_migrate_mapping. With the device page being locked in > > > do_swap_page, the migrate_vma_* functions need to be updated to avoid > > > locking the fault_page argument. > > > > > > Prior to this change, a livelock scenario could occur in Xe's (Intel GPU > > > DRM driver) SVM implementation if enough threads faulted the same device > > > page. > > > > > > Cc: Philip Yang <Philip.Yang@xxxxxxx> > > > Cc: Felix Kuehling <felix.kuehling@xxxxxxx> > > > Cc: Christian König <christian.koenig@xxxxxxx> > > > Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> > > > Suggessted-by: Simona Vetter <simona.vetter@xxxxxxxx> > > > Signed-off-by: Matthew Brost <matthew.brost@xxxxxxxxx> > > > --- > > > mm/memory.c | 13 ++++++--- > > > mm/migrate_device.c | 69 ++++++++++++++++++++++++++++++--------------- > > > 2 files changed, 56 insertions(+), 26 deletions(-) > > > > > > diff --git a/mm/memory.c b/mm/memory.c > > > index 2366578015ad..b72bde782611 100644 > > > --- a/mm/memory.c > > > +++ b/mm/memory.c > > > @@ -4252,10 +4252,15 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) > > > * Get a page reference while we know the page can't be > > > * freed. > > > */ > > > - get_page(vmf->page); > > > - pte_unmap_unlock(vmf->pte, vmf->ptl); > > > - ret = vmf->page->pgmap->ops->migrate_to_ram(vmf); > > > - put_page(vmf->page); > > > + if (trylock_page(vmf->page)) { > > > + get_page(vmf->page); > > > + pte_unmap_unlock(vmf->pte, vmf->ptl); > > > + ret = vmf->page->pgmap->ops->migrate_to_ram(vmf); > > > + put_page(vmf->page); > > > + unlock_page(vmf->page); > > > > Isn't the order wrong here? In the common case put_page() will have just > > dropped the last reference on the page and freed it so the unlock_page() > > needs to happen first. > > > > Yes, this appears wrong. I haven't seen this show up as a problem but > certainly a put should be done after dropping the lock. > > > > + } else { > > > + pte_unmap_unlock(vmf->pte, vmf->ptl); > > > + } > > > } else if (is_hwpoison_entry(entry)) { > > > ret = VM_FAULT_HWPOISON; > > > } else if (is_pte_marker_entry(entry)) { > > > diff --git a/mm/migrate_device.c b/mm/migrate_device.c > > > index f163c2131022..2477d39f57be 100644 > > > --- a/mm/migrate_device.c > > > +++ b/mm/migrate_device.c > > > @@ -60,6 +60,8 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, > > > struct mm_walk *walk) > > > { > > > struct migrate_vma *migrate = walk->private; > > > + struct folio *fault_folio = migrate->fault_page ? > > > + page_folio(migrate->fault_page) : NULL; > > > struct vm_area_struct *vma = walk->vma; > > > struct mm_struct *mm = vma->vm_mm; > > > unsigned long addr = start, unmapped = 0; > > > @@ -88,11 +90,13 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, > > > > > > folio_get(folio); > > > spin_unlock(ptl); > > > - if (unlikely(!folio_trylock(folio))) > > > + if (unlikely(fault_folio != folio && > > > > We don't currently support large ZONE_DEVICE pages so we should never > > get here. I think a WARN_ON_ONCE(fault_folio == folio) and bail would be > > better. > > > > Sure will fix. > > > > + !folio_trylock(folio))) > > > return migrate_vma_collect_skip(start, end, > > > walk); > > > ret = split_folio(folio); > > > - folio_unlock(folio); > > > + if (fault_folio != folio) > > > + folio_unlock(folio); > > > folio_put(folio); > > > if (ret) > > > return migrate_vma_collect_skip(start, end, > > > @@ -192,7 +196,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, > > > * optimisation to avoid walking the rmap later with > > > * try_to_migrate(). > > > */ > > > - if (folio_trylock(folio)) { > > > + if (fault_folio == folio || folio_trylock(folio)) { > > > bool anon_exclusive; > > > pte_t swp_pte; > > > > > > @@ -204,7 +208,8 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, > > > > > > if (folio_try_share_anon_rmap_pte(folio, page)) { > > > set_pte_at(mm, addr, ptep, pte); > > > - folio_unlock(folio); > > > + if (fault_folio != folio) > > > + folio_unlock(folio); > > > folio_put(folio); > > > mpfn = 0; > > > goto next; > > > @@ -363,6 +368,8 @@ static unsigned long migrate_device_unmap(unsigned long *src_pfns, > > > unsigned long npages, > > > struct page *fault_page) > > > { > > > + struct folio *fault_folio = fault_page ? > > > + page_folio(fault_page) : NULL; > > > unsigned long i, restore = 0; > > > bool allow_drain = true; > > > unsigned long unmapped = 0; > > > @@ -427,7 +434,8 @@ static unsigned long migrate_device_unmap(unsigned long *src_pfns, > > > remove_migration_ptes(folio, folio, 0); > > > > > > src_pfns[i] = 0; > > > - folio_unlock(folio); > > > + if (fault_folio != folio) > > > + folio_unlock(folio); > > > folio_put(folio); > > > restore--; > > > } > > > @@ -536,6 +544,8 @@ int migrate_vma_setup(struct migrate_vma *args) > > > return -EINVAL; > > > if (args->fault_page && !is_device_private_page(args->fault_page)) > > > return -EINVAL; > > > + if (args->fault_page && !PageLocked(args->fault_page)) > > > + return -EINVAL; > > > > > > memset(args->src, 0, sizeof(*args->src) * nr_pages); > > > args->cpages = 0; > > > @@ -799,19 +809,13 @@ void migrate_vma_pages(struct migrate_vma *migrate) > > > } > > > EXPORT_SYMBOL(migrate_vma_pages); > > > > > > -/* > > > - * migrate_device_finalize() - complete page migration > > > - * @src_pfns: src_pfns returned from migrate_device_range() > > > - * @dst_pfns: array of pfns allocated by the driver to migrate memory to > > > - * @npages: number of pages in the range > > > - * > > > - * Completes migration of the page by removing special migration entries. > > > - * Drivers must ensure copying of page data is complete and visible to the CPU > > > - * before calling this. > > > - */ > > > -void migrate_device_finalize(unsigned long *src_pfns, > > > - unsigned long *dst_pfns, unsigned long npages) > > > +static void __migrate_device_finalize(unsigned long *src_pfns, > > > + unsigned long *dst_pfns, > > > + unsigned long npages, > > > + struct page *fault_page) > > > { > > > + struct folio *fault_folio = fault_page ? > > > + page_folio(fault_page) : NULL; > > > unsigned long i; > > > > > > for (i = 0; i < npages; i++) { > > > @@ -824,7 +828,8 @@ void migrate_device_finalize(unsigned long *src_pfns, > > > > > > if (!page) { > > > if (dst) { > > > - folio_unlock(dst); > > > + if (fault_folio != dst) > > > + folio_unlock(dst); > > > > How could the destination page be the faulting page? I think we can drop > > this check. > > > > Yea, will drop. > Actually landing on a WARN_ON_ONCE(fault_folio == dst) to catch potentail bugs. Will include that in my next rev. If you disagree let me know and can discuss ahead of next rev. Matt > > > folio_put(dst); > > > } > > > continue; > > > @@ -834,14 +839,16 @@ void migrate_device_finalize(unsigned long *src_pfns, > > > > > > if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE) || !dst) { > > > if (dst) { > > > - folio_unlock(dst); > > > + if (fault_folio != dst) > > > + folio_unlock(dst); > > > > Likewise. > > > > Same here. > > > > folio_put(dst); > > > } > > > dst = src; > > > } > > > > > > remove_migration_ptes(src, dst, 0); > > > - folio_unlock(src); > > > + if (fault_folio != src) > > > + folio_unlock(src); > > > > > > if (folio_is_zone_device(src)) > > > folio_put(src); > > > @@ -849,7 +856,8 @@ void migrate_device_finalize(unsigned long *src_pfns, > > > folio_putback_lru(src); > > > > > > if (dst != src) { > > > - folio_unlock(dst); > > > + if (fault_folio != dst) > > > + folio_unlock(dst); > > > > This one also seems unnecessary. > > > > Same here. > > Thanks - Matt > > > - Alistair > > > > > if (folio_is_zone_device(dst)) > > > folio_put(dst); > > > else > > > @@ -857,6 +865,22 @@ void migrate_device_finalize(unsigned long *src_pfns, > > > } > > > } > > > } > > > + > > > +/* > > > + * migrate_device_finalize() - complete page migration > > > + * @src_pfns: src_pfns returned from migrate_device_range() > > > + * @dst_pfns: array of pfns allocated by the driver to migrate memory to > > > + * @npages: number of pages in the range > > > + * > > > + * Completes migration of the page by removing special migration entries. > > > + * Drivers must ensure copying of page data is complete and visible to the CPU > > > + * before calling this. > > > + */ > > > +void migrate_device_finalize(unsigned long *src_pfns, > > > + unsigned long *dst_pfns, unsigned long npages) > > > +{ > > > + return __migrate_device_finalize(src_pfns, dst_pfns, npages, NULL); > > > +} > > > EXPORT_SYMBOL(migrate_device_finalize); > > > > > > /** > > > @@ -872,7 +896,8 @@ EXPORT_SYMBOL(migrate_device_finalize); > > > */ > > > void migrate_vma_finalize(struct migrate_vma *migrate) > > > { > > > - migrate_device_finalize(migrate->src, migrate->dst, migrate->npages); > > > + __migrate_device_finalize(migrate->src, migrate->dst, migrate->npages, > > > + migrate->fault_page); > > > } > > > EXPORT_SYMBOL(migrate_vma_finalize); > >