From: Huang Ying <ying.huang@xxxxxxxxx> In this patch, locking related code is shared between huge/normal code path in put_swap_page() to reduce code duplication. And `free_entries == 0` case is merged into more general `free_entries != SWAPFILE_CLUSTER` case, because the new locking method makes it easy. The added lines is same as the removed lines. But the code size is increased when CONFIG_TRANSPARENT_HUGEPAGE=n. text data bss dec hex filename base: 24215 2028 340 26583 67d7 mm/swapfile.o unified: 24577 2028 340 26945 6941 mm/swapfile.o Signed-off-by: "Huang, Ying" <ying.huang@xxxxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: Shaohua Li <shli@xxxxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: Minchan Kim <minchan@xxxxxxxxxx> Cc: Rik van Riel <riel@xxxxxxxxxx> Cc: Daniel Jordan <daniel.m.jordan@xxxxxxxxxx> Cc: Dan Williams <dan.j.williams@xxxxxxxxx> --- mm/swapfile.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index bc488bf36c86..17dce780b4c8 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1280,8 +1280,8 @@ void put_swap_page(struct page *page, swp_entry_t entry) if (!si) return; + ci = lock_cluster_or_swap_info(si, offset); if (nr == SWAPFILE_CLUSTER) { - ci = lock_cluster(si, offset); VM_BUG_ON(!cluster_is_huge(ci)); map = si->swap_map + offset; for (i = 0; i < SWAPFILE_CLUSTER; i++) { @@ -1290,13 +1290,9 @@ void put_swap_page(struct page *page, swp_entry_t entry) if (val == SWAP_HAS_CACHE) free_entries++; } - if (!free_entries) { - for (i = 0; i < SWAPFILE_CLUSTER; i++) - map[i] &= ~SWAP_HAS_CACHE; - } cluster_clear_huge(ci); - unlock_cluster(ci); if (free_entries == SWAPFILE_CLUSTER) { + unlock_cluster_or_swap_info(si, ci); spin_lock(&si->lock); ci = lock_cluster(si, offset); memset(map, 0, SWAPFILE_CLUSTER); @@ -1307,12 +1303,16 @@ void put_swap_page(struct page *page, swp_entry_t entry) return; } } - if (nr == 1 || free_entries) { - for (i = 0; i < nr; i++, entry.val++) { - if (!__swap_entry_free(si, entry, SWAP_HAS_CACHE)) - free_swap_slot(entry); + for (i = 0; i < nr; i++, entry.val++) { + if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) { + unlock_cluster_or_swap_info(si, ci); + free_swap_slot(entry); + if (i == nr - 1) + return; + lock_cluster_or_swap_info(si, offset); } } + unlock_cluster_or_swap_info(si, ci); } #ifdef CONFIG_THP_SWAP -- 2.16.4