Change folio_lock_or_retry to accept vm_fault struct and return the vm_fault_t directly. This will be used later to return additional information about the state of the mmap_lock upon return from this function. Suggested-by: Matthew Wilcox <willy@xxxxxxxxxxxxx> Signed-off-by: Suren Baghdasaryan <surenb@xxxxxxxxxx> --- include/linux/pagemap.h | 13 ++++++------- mm/filemap.c | 29 +++++++++++++++-------------- mm/memory.c | 14 ++++++-------- 3 files changed, 27 insertions(+), 29 deletions(-) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a56308a9d1a4..0bc206c6f62c 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -896,8 +896,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page, void __folio_lock(struct folio *folio); int __folio_lock_killable(struct folio *folio); -bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm, - unsigned int flags); +vm_fault_t __folio_lock_fault(struct folio *folio, struct vm_fault *vmf); void unlock_page(struct page *page); void folio_unlock(struct folio *folio); @@ -995,17 +994,17 @@ static inline int folio_lock_killable(struct folio *folio) } /* - * folio_lock_or_retry - Lock the folio, unless this would block and the + * folio_lock_fault - Lock the folio, unless this would block and the * caller indicated that it can handle a retry. * * Return value and mmap_lock implications depend on flags; see - * __folio_lock_or_retry(). + * __folio_lock_fault(). */ -static inline bool folio_lock_or_retry(struct folio *folio, - struct mm_struct *mm, unsigned int flags) +static inline vm_fault_t folio_lock_fault(struct folio *folio, + struct vm_fault *vmf) { might_sleep(); - return folio_trylock(folio) || __folio_lock_or_retry(folio, mm, flags); + return folio_trylock(folio) ? 0 : __folio_lock_fault(folio, vmf); } /* diff --git a/mm/filemap.c b/mm/filemap.c index 00f01d8ead47..87b335a93530 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1701,46 +1701,47 @@ static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait) /* * Return values: - * true - folio is locked; mmap_lock is still held. - * false - folio is not locked. + * 0 - folio is locked. + * VM_FAULT_RETRY - folio is not locked. * mmap_lock has been released (mmap_read_unlock(), unless flags had both * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in * which case mmap_lock is still held. * - * If neither ALLOW_RETRY nor KILLABLE are set, will always return true + * If neither ALLOW_RETRY nor KILLABLE are set, will always return 0 * with the folio locked and the mmap_lock unperturbed. */ -bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm, - unsigned int flags) +vm_fault_t __folio_lock_fault(struct folio *folio, struct vm_fault *vmf) { - if (fault_flag_allow_retry_first(flags)) { + struct mm_struct *mm = vmf->vma->vm_mm; + + if (fault_flag_allow_retry_first(vmf->flags)) { /* * CAUTION! In this case, mmap_lock is not released - * even though return 0. + * even though return VM_FAULT_RETRY. */ - if (flags & FAULT_FLAG_RETRY_NOWAIT) - return false; + if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) + return VM_FAULT_RETRY; mmap_read_unlock(mm); - if (flags & FAULT_FLAG_KILLABLE) + if (vmf->flags & FAULT_FLAG_KILLABLE) folio_wait_locked_killable(folio); else folio_wait_locked(folio); - return false; + return VM_FAULT_RETRY; } - if (flags & FAULT_FLAG_KILLABLE) { + if (vmf->flags & FAULT_FLAG_KILLABLE) { bool ret; ret = __folio_lock_killable(folio); if (ret) { mmap_read_unlock(mm); - return false; + return VM_FAULT_RETRY; } } else { __folio_lock(folio); } - return true; + return 0; } /** diff --git a/mm/memory.c b/mm/memory.c index 9011ad63c41b..3c2acafcd7b6 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3568,6 +3568,7 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) struct folio *folio = page_folio(vmf->page); struct vm_area_struct *vma = vmf->vma; struct mmu_notifier_range range; + vm_fault_t ret; /* * We need a reference to lock the folio because we don't hold @@ -3580,9 +3581,10 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) if (!folio_try_get(folio)) return 0; - if (!folio_lock_or_retry(folio, vma->vm_mm, vmf->flags)) { + ret = folio_lock_fault(folio, vmf); + if (ret) { folio_put(folio); - return VM_FAULT_RETRY; + return ret; } mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma->vm_mm, vmf->address & PAGE_MASK, @@ -3704,7 +3706,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) bool exclusive = false; swp_entry_t entry; pte_t pte; - int locked; vm_fault_t ret = 0; void *shadow = NULL; @@ -3825,12 +3826,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) goto out_release; } - locked = folio_lock_or_retry(folio, vma->vm_mm, vmf->flags); - - if (!locked) { - ret |= VM_FAULT_RETRY; + ret |= folio_lock_fault(folio, vmf); + if (ret & VM_FAULT_RETRY) goto out_release; - } if (swapcache) { /* -- 2.41.0.178.g377b9f9a00-goog