The readpage operation can block in many (most?) filesystems, so we should punt to a work queue instead of calling it. This was the last caller of lock_page_for_iocb(), so remove it. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Reviewed-by: Kent Overstreet <kent.overstreet@xxxxxxxxx> Reviewed-by: Christoph Hellwig <hch@xxxxxx> --- mm/filemap.c | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index 076a97dcacf1e..e904e53ae90d9 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2144,16 +2144,6 @@ static void shrink_readahead_size_eio(struct file_ra_state *ra) ra->ra_pages /= 4; } -static int lock_page_for_iocb(struct kiocb *iocb, struct page *page) -{ - if (iocb->ki_flags & IOCB_WAITQ) - return lock_page_async(page, iocb->ki_waitq); - else if (iocb->ki_flags & IOCB_NOWAIT) - return trylock_page(page) ? 0 : -EAGAIN; - else - return lock_page_killable(page); -} - /* * filemap_get_read_batch - Get a batch of pages for read * @@ -2205,7 +2195,7 @@ static struct page *filemap_read_page(struct kiocb *iocb, struct file *filp, struct file_ra_state *ra = &filp->f_ra; int error; - if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT)) { + if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ)) { unlock_page(page); put_page(page); return ERR_PTR(-EAGAIN); @@ -2226,7 +2216,7 @@ static struct page *filemap_read_page(struct kiocb *iocb, struct file *filp, } if (!PageUptodate(page)) { - error = lock_page_for_iocb(iocb, page); + error = lock_page_killable(page); if (unlikely(error)) { put_page(page); return ERR_PTR(error); -- 2.29.2