Factor out the actual swap entry freeing logic to new helper __swap_entries_free(). This allow us to futher simplify other swap entry freeing code by leveraging __swap_entries_free() helper function. Signed-off-by: Kemeng Shi <shikemeng@xxxxxxxxxxxxxxx> --- mm/swapfile.c | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index 5a775456e26c..7c886f9dd6f9 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1347,6 +1347,25 @@ static struct swap_info_struct *_swap_info_get(swp_entry_t entry) return NULL; } +static inline void __swap_entries_free(struct swap_info_struct *si, + struct swap_cluster_info *ci, + swp_entry_t entry, unsigned int nr_pages) +{ + unsigned long offset = swp_offset(entry); + + VM_BUG_ON(cluster_is_empty(ci)); + VM_BUG_ON(ci->count < nr_pages); + + ci->count -= nr_pages; + mem_cgroup_uncharge_swap(entry, nr_pages); + swap_range_free(si, offset, nr_pages); + + if (!ci->count) + free_cluster(si, ci); + else + partial_free_cluster(si, ci); +} + static unsigned char swap_entry_put_locked(struct swap_info_struct *si, unsigned long offset, unsigned char usage) @@ -1525,22 +1544,13 @@ static void swap_entry_range_free(struct swap_info_struct *si, /* It should never free entries across different clusters */ VM_BUG_ON(ci != offset_to_cluster(si, offset + nr_pages - 1)); - VM_BUG_ON(cluster_is_empty(ci)); - VM_BUG_ON(ci->count < nr_pages); - ci->count -= nr_pages; do { VM_BUG_ON(*map != SWAP_HAS_CACHE); *map = 0; } while (++map < map_end); - mem_cgroup_uncharge_swap(entry, nr_pages); - swap_range_free(si, offset, nr_pages); - - if (!ci->count) - free_cluster(si, ci); - else - partial_free_cluster(si, ci); + __swap_entries_free(si, ci, entry, nr_pages); } static void cluster_swap_free_nr(struct swap_info_struct *si, -- 2.30.0