The patch titled Subject: mm/filemap: inline __wait_on_page_locked_async into caller has been removed from the -mm tree. Its filename was mm-filemap-inline-__wait_on_page_locked_async-into-caller.patch This patch was dropped because it was merged into mainline or a subsystem tree ------------------------------------------------------ From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx> Subject: mm/filemap: inline __wait_on_page_locked_async into caller The previous patch removed wait_on_page_locked_async(), so inline __wait_on_page_locked_async into __lock_page_async(). Link: https://lkml.kernel.org/r/20210122160140.223228-8-willy@xxxxxxxxxxxxx Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Reviewed-by: Kent Overstreet <kent.overstreet@xxxxxxxxx> Reviewed-by: Christoph Hellwig <hch@xxxxxx> Cc: Miaohe Lin <linmiaohe@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/filemap.c | 53 ++++++++++++++++++++----------------------------- 1 file changed, 22 insertions(+), 31 deletions(-) --- a/mm/filemap.c~mm-filemap-inline-__wait_on_page_locked_async-into-caller +++ a/mm/filemap.c @@ -1343,36 +1343,6 @@ int wait_on_page_bit_killable(struct pag } EXPORT_SYMBOL(wait_on_page_bit_killable); -static int __wait_on_page_locked_async(struct page *page, - struct wait_page_queue *wait, bool set) -{ - struct wait_queue_head *q = page_waitqueue(page); - int ret = 0; - - wait->page = page; - wait->bit_nr = PG_locked; - - spin_lock_irq(&q->lock); - __add_wait_queue_entry_tail(q, &wait->wait); - SetPageWaiters(page); - if (set) - ret = !trylock_page(page); - else - ret = PageLocked(page); - /* - * If we were successful now, we know we're still on the - * waitqueue as we're still under the lock. This means it's - * safe to remove and return success, we know the callback - * isn't going to trigger. - */ - if (!ret) - __remove_wait_queue(q, &wait->wait); - else - ret = -EIOCBQUEUED; - spin_unlock_irq(&q->lock); - return ret; -} - /** * put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked * @page: The page to wait for. @@ -1548,7 +1518,28 @@ EXPORT_SYMBOL_GPL(__lock_page_killable); int __lock_page_async(struct page *page, struct wait_page_queue *wait) { - return __wait_on_page_locked_async(page, wait, true); + struct wait_queue_head *q = page_waitqueue(page); + int ret = 0; + + wait->page = page; + wait->bit_nr = PG_locked; + + spin_lock_irq(&q->lock); + __add_wait_queue_entry_tail(q, &wait->wait); + SetPageWaiters(page); + ret = !trylock_page(page); + /* + * If we were successful now, we know we're still on the + * waitqueue as we're still under the lock. This means it's + * safe to remove and return success, we know the callback + * isn't going to trigger. + */ + if (!ret) + __remove_wait_queue(q, &wait->wait); + else + ret = -EIOCBQUEUED; + spin_unlock_irq(&q->lock); + return ret; } /* _ Patches currently in -mm which might be from willy@xxxxxxxxxxxxx are mm-make-pagecache-tagged-lookups-return-only-head-pages.patch mm-shmem-use-pagevec_lookup-in-shmem_unlock_mapping.patch mm-swap-optimise-get_shadow_from_swap_cache.patch mm-add-fgp_entry.patch mm-filemap-rename-find_get_entry-to-mapping_get_entry.patch mm-filemap-add-helper-for-finding-pages.patch mm-filemap-add-helper-for-finding-pages-fix.patch mm-filemap-add-mapping_seek_hole_data.patch mm-filemap-add-mapping_seek_hole_data-fix.patch iomap-use-mapping_seek_hole_data.patch mm-add-and-use-find_lock_entries.patch mm-add-and-use-find_lock_entries-fix.patch mm-add-an-end-parameter-to-find_get_entries.patch mm-add-an-end-parameter-to-pagevec_lookup_entries.patch mm-remove-nr_entries-parameter-from-pagevec_lookup_entries.patch mm-pass-pvec-directly-to-find_get_entries.patch mm-remove-pagevec_lookup_entries.patch