Move the checks for IOCB_NOWAIT and IOCB_WAITQ from the only caller into generic_file_buffered_read_pagenotuptodate, which simplifies the error unwinding. Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- mm/filemap.c | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index c717cfe35cc72a..bae5b905aa7bdc 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2219,19 +2219,22 @@ static int filemap_readpage(struct kiocb *iocb, struct page *page) static int generic_file_buffered_read_pagenotuptodate(struct kiocb *iocb, struct iov_iter *iter, struct page *page, loff_t pos, - loff_t count) + loff_t count, bool first) { struct address_space *mapping = iocb->ki_filp->f_mapping; - int error; + int error = -EAGAIN; + + if (iocb->ki_flags & IOCB_NOWAIT) + goto put_page; /* - * See comment in do_read_cache_page on why - * wait_on_page_locked is used to avoid unnecessarily - * serialisations and why it's safe. + * See comment in do_read_cache_page on why wait_on_page_locked is used + * to avoid unnecessarily serialisations and why it's safe. */ if (iocb->ki_flags & IOCB_WAITQ) { - error = wait_on_page_locked_async(page, - iocb->ki_waitq); + if (!first) + goto put_page; + error = wait_on_page_locked_async(page, iocb->ki_waitq); } else { error = wait_on_page_locked_killable(page); } @@ -2376,17 +2379,8 @@ static int generic_file_buffered_read_get_pages(struct kiocb *iocb, } if (!PageUptodate(page)) { - if ((iocb->ki_flags & IOCB_NOWAIT) || - ((iocb->ki_flags & IOCB_WAITQ) && i)) { - for (j = i; j < nr_got; j++) - put_page(pages[j]); - nr_got = i; - err = -EAGAIN; - break; - } - err = generic_file_buffered_read_pagenotuptodate(iocb, - iter, page, pg_pos, pg_count); + iter, page, pg_pos, pg_count, i == 0); if (err) { if (err == AOP_TRUNCATED_PAGE) err = 0; -- 2.28.0