On 09/04/2024 09:26, Barry Song wrote: > From: Chuanhua Han <hanchuanhua@xxxxxxxx> > > While swapping in a large folio, we need to free swaps related to the whole > folio. To avoid frequently acquiring and releasing swap locks, it is better > to introduce an API for batched free. > > Signed-off-by: Chuanhua Han <hanchuanhua@xxxxxxxx> > Co-developed-by: Barry Song <v-songbaohua@xxxxxxxx> > Signed-off-by: Barry Song <v-songbaohua@xxxxxxxx> Couple of nits; feel free to ignore. Reviewed-by: Ryan Roberts <ryan.roberts@xxxxxxx> > --- > include/linux/swap.h | 5 +++++ > mm/swapfile.c | 51 ++++++++++++++++++++++++++++++++++++++++++++ > 2 files changed, 56 insertions(+) > > diff --git a/include/linux/swap.h b/include/linux/swap.h > index 11c53692f65f..b7a107e983b8 100644 > --- a/include/linux/swap.h > +++ b/include/linux/swap.h > @@ -483,6 +483,7 @@ extern void swap_shmem_alloc(swp_entry_t); > extern int swap_duplicate(swp_entry_t); > extern int swapcache_prepare(swp_entry_t); > extern void swap_free(swp_entry_t); > +extern void swap_free_nr(swp_entry_t entry, int nr_pages); > extern void swapcache_free_entries(swp_entry_t *entries, int n); > extern void free_swap_and_cache_nr(swp_entry_t entry, int nr); > int swap_type_of(dev_t device, sector_t offset); > @@ -564,6 +565,10 @@ static inline void swap_free(swp_entry_t swp) > { > } > > +void swap_free_nr(swp_entry_t entry, int nr_pages) > +{ > +} > + > static inline void put_swap_folio(struct folio *folio, swp_entry_t swp) > { > } > diff --git a/mm/swapfile.c b/mm/swapfile.c > index 28642c188c93..f4c65aeb088d 100644 > --- a/mm/swapfile.c > +++ b/mm/swapfile.c > @@ -1356,6 +1356,57 @@ void swap_free(swp_entry_t entry) > __swap_entry_free(p, entry); > } > > +/* > + * Free up the maximum number of swap entries at once to limit the > + * maximum kernel stack usage. > + */ > +#define SWAP_BATCH_NR (SWAPFILE_CLUSTER > 512 ? 512 : SWAPFILE_CLUSTER) > + > +/* > + * Called after swapping in a large folio, batched free swap entries > + * for this large folio, entry should be for the first subpage and > + * its offset is aligned with nr_pages > + */ > +void swap_free_nr(swp_entry_t entry, int nr_pages) > +{ > + int i, j; > + struct swap_cluster_info *ci; > + struct swap_info_struct *p; > + unsigned int type = swp_type(entry); > + unsigned long offset = swp_offset(entry); > + int batch_nr, remain_nr; > + DECLARE_BITMAP(usage, SWAP_BATCH_NR) = { 0 }; > + > + /* all swap entries are within a cluster for mTHP */ > + VM_BUG_ON(offset % SWAPFILE_CLUSTER + nr_pages > SWAPFILE_CLUSTER); > + > + if (nr_pages == 1) { > + swap_free(entry); > + return; > + } > + > + remain_nr = nr_pages; > + p = _swap_info_get(entry); > + if (p) { nit: perhaps return early if (!p) ? Then you dedent the for() block. > + for (i = 0; i < nr_pages; i += batch_nr) { > + batch_nr = min_t(int, SWAP_BATCH_NR, remain_nr); > + > + ci = lock_cluster_or_swap_info(p, offset); > + for (j = 0; j < batch_nr; j++) { > + if (__swap_entry_free_locked(p, offset + i * SWAP_BATCH_NR + j, 1)) > + __bitmap_set(usage, j, 1); > + } > + unlock_cluster_or_swap_info(p, ci); > + > + for_each_clear_bit(j, usage, batch_nr) > + free_swap_slot(swp_entry(type, offset + i * SWAP_BATCH_NR + j)); > + nit: perhaps change to for (;;), and do the checks here to avoid clearing the bitmap on the last run: i += batch_nr; if (i < nr_pages) break; > + bitmap_clear(usage, 0, SWAP_BATCH_NR); > + remain_nr -= batch_nr; > + } > + } > +} > + > /* > * Called after dropping swapcache to decrease refcnt to swap entries. > */