Yosry Ahmed <yosryahmed@xxxxxxxxxx> writes: > swap_read_folio() reads the folio synchronously if synchronous is passed > as true or if SWP_SYNCHRONOUS_IO is set in swap_info_struct. The only > caller that passes synchronous=true is in do_swap_page() in the > SWP_SYNCHRONOUS_IO case. > > Hence, the argument is redundant, it is only set to true when the swap > read would have been synchronous anyway. Remove it. > > Signed-off-by: Yosry Ahmed <yosryahmed@xxxxxxxxxx> LGTM, Thanks! Reviewed-by: "Huang, Ying" <ying.huang@xxxxxxxxx> And, there's some history information in commit b243dcbf2f13 ("swap: remove remnants of polling from read_swap_cache_async"). " Commit [1] introduced IO polling support duding swapin to reduce swap read latency for block devices that can be polled. However later commit [2] removed polling support. Therefore it seems safe to remove do_poll parameter in read_swap_cache_async and always call swap_readpage with synchronous=false waiting for IO completion in folio_lock_or_retry. [1] commit 23955622ff8d ("swap: add block io poll in swapin path") [2] commit 9650b453a3d4 ("block: ignore RWF_HIPRI hint for sync dio") " IMO, it will help people to understand the code change to add some history information as above, or refer to the commit. -- Best Regards, Huang, Ying > --- > mm/memory.c | 2 +- > mm/page_io.c | 6 +++--- > mm/swap.h | 6 ++---- > mm/swap_state.c | 10 +++++----- > 4 files changed, 11 insertions(+), 13 deletions(-) > > diff --git a/mm/memory.c b/mm/memory.c > index db91304882312..2b3ef08e8bb7d 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -4113,7 +4113,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) > > /* To provide entry to swap_read_folio() */ > folio->swap = entry; > - swap_read_folio(folio, true, NULL); > + swap_read_folio(folio, NULL); > folio->private = NULL; > } > } else { > diff --git a/mm/page_io.c b/mm/page_io.c > index 41e8d738c6d28..f1a9cfab6e748 100644 > --- a/mm/page_io.c > +++ b/mm/page_io.c > @@ -493,10 +493,10 @@ static void swap_read_folio_bdev_async(struct folio *folio, > submit_bio(bio); > } > > -void swap_read_folio(struct folio *folio, bool synchronous, > - struct swap_iocb **plug) > +void swap_read_folio(struct folio *folio, struct swap_iocb **plug) > { > struct swap_info_struct *sis = swp_swap_info(folio->swap); > + bool synchronous = sis->flags & SWP_SYNCHRONOUS_IO; > bool workingset = folio_test_workingset(folio); > unsigned long pflags; > bool in_thrashing; > @@ -521,7 +521,7 @@ void swap_read_folio(struct folio *folio, bool synchronous, > folio_unlock(folio); > } else if (data_race(sis->flags & SWP_FS_OPS)) { > swap_read_folio_fs(folio, plug); > - } else if (synchronous || (sis->flags & SWP_SYNCHRONOUS_IO)) { > + } else if (synchronous) { > swap_read_folio_bdev_sync(folio, sis); > } else { > swap_read_folio_bdev_async(folio, sis); > diff --git a/mm/swap.h b/mm/swap.h > index 2c0e96272d498..baa1fa946b347 100644 > --- a/mm/swap.h > +++ b/mm/swap.h > @@ -11,8 +11,7 @@ struct mempolicy; > /* linux/mm/page_io.c */ > int sio_pool_init(void); > struct swap_iocb; > -void swap_read_folio(struct folio *folio, bool do_poll, > - struct swap_iocb **plug); > +void swap_read_folio(struct folio *folio, struct swap_iocb **plug); > void __swap_read_unplug(struct swap_iocb *plug); > static inline void swap_read_unplug(struct swap_iocb *plug) > { > @@ -83,8 +82,7 @@ static inline unsigned int folio_swap_flags(struct folio *folio) > } > #else /* CONFIG_SWAP */ > struct swap_iocb; > -static inline void swap_read_folio(struct folio *folio, bool do_poll, > - struct swap_iocb **plug) > +static inline void swap_read_folio(struct folio *folio, struct swap_iocb **plug) > { > } > static inline void swap_write_unplug(struct swap_iocb *sio) > diff --git a/mm/swap_state.c b/mm/swap_state.c > index 0803eedeabe3d..994723cef8217 100644 > --- a/mm/swap_state.c > +++ b/mm/swap_state.c > @@ -567,7 +567,7 @@ struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, > mpol_cond_put(mpol); > > if (page_allocated) > - swap_read_folio(folio, false, plug); > + swap_read_folio(folio, plug); > return folio; > } > > @@ -684,7 +684,7 @@ struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, > if (!folio) > continue; > if (page_allocated) { > - swap_read_folio(folio, false, &splug); > + swap_read_folio(folio, &splug); > if (offset != entry_offset) { > folio_set_readahead(folio); > count_vm_event(SWAP_RA); > @@ -701,7 +701,7 @@ struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, > &page_allocated, false); > if (unlikely(page_allocated)) { > zswap_folio_swapin(folio); > - swap_read_folio(folio, false, NULL); > + swap_read_folio(folio, NULL); > } > return folio; > } > @@ -834,7 +834,7 @@ static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask, > if (!folio) > continue; > if (page_allocated) { > - swap_read_folio(folio, false, &splug); > + swap_read_folio(folio, &splug); > if (addr != vmf->address) { > folio_set_readahead(folio); > count_vm_event(SWAP_RA); > @@ -853,7 +853,7 @@ static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask, > &page_allocated, false); > if (unlikely(page_allocated)) { > zswap_folio_swapin(folio); > - swap_read_folio(folio, false, NULL); > + swap_read_folio(folio, NULL); > } > return folio; > }