Make it plain that this takes the head page (which before this point was just an assumption, but is now enforced by the compiler). Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- mm/page_io.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mm/page_io.c b/mm/page_io.c index 864a558b7b68..0bc52e16452d 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -454,15 +454,15 @@ static void swap_readpage_fs(struct folio *folio, struct swap_iocb **plug) *plug = sio; } -static void swap_readpage_bdev_sync(struct page *page, +static void swap_readpage_bdev_sync(struct folio *folio, struct swap_info_struct *sis) { struct bio_vec bv; struct bio bio; bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ); - bio.bi_iter.bi_sector = swap_page_sector(page); - __bio_add_page(&bio, page, thp_size(page), 0); + bio.bi_iter.bi_sector = swap_page_sector(&folio->page); + bio_add_folio_nofail(&bio, folio, folio_size(folio), 0); /* * Keep this task valid during swap readpage because the oom killer may * attempt to access it in the page fault retry time check. @@ -516,7 +516,7 @@ void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug) } else if (data_race(sis->flags & SWP_FS_OPS)) { swap_readpage_fs(folio, plug); } else if (synchronous || (sis->flags & SWP_SYNCHRONOUS_IO)) { - swap_readpage_bdev_sync(page, sis); + swap_readpage_bdev_sync(folio, sis); } else { swap_readpage_bdev_async(page, sis); } -- 2.42.0