The patch titled Subject: mm/swapfile.c: unify normal/huge code path in put_swap_page() has been added to the -mm tree. Its filename is swap-unify-normal-huge-code-path-in-put_swap_page.patch This patch should soon appear at http://ozlabs.org/~akpm/mmots/broken-out/swap-unify-normal-huge-code-path-in-put_swap_page.patch and later at http://ozlabs.org/~akpm/mmotm/broken-out/swap-unify-normal-huge-code-path-in-put_swap_page.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Huang Ying <ying.huang@xxxxxxxxx> Subject: mm/swapfile.c: unify normal/huge code path in put_swap_page() In this patch, the normal/huge code path in put_swap_page() and several helper functions are unified to avoid duplicated code, bugs, etc. and make it easier to review the code. The removed lines are more than added lines. And the binary size is kept exactly same when CONFIG_TRANSPARENT_HUGEPAGE=n. Link: http://lkml.kernel.org/r/20180720071845.17920-6-ying.huang@xxxxxxxxx Signed-off-by: "Huang, Ying" <ying.huang@xxxxxxxxx> Suggested-by: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx> Acked-by: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx> Reviewed-by: Daniel Jordan <daniel.m.jordan@xxxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: Shaohua Li <shli@xxxxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: Minchan Kim <minchan@xxxxxxxxxx> Cc: Rik van Riel <riel@xxxxxxxxxx> Cc: Dan Williams <dan.j.williams@xxxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/swapfile.c | 83 +++++++++++++++++++++--------------------------- 1 file changed, 37 insertions(+), 46 deletions(-) diff -puN mm/swapfile.c~swap-unify-normal-huge-code-path-in-put_swap_page mm/swapfile.c --- a/mm/swapfile.c~swap-unify-normal-huge-code-path-in-put_swap_page +++ a/mm/swapfile.c @@ -204,8 +204,16 @@ static void discard_swap_cluster(struct #ifdef CONFIG_THP_SWAP #define SWAPFILE_CLUSTER HPAGE_PMD_NR + +#define swap_entry_size(size) (size) #else #define SWAPFILE_CLUSTER 256 + +/* + * Define swap_entry_size() as constant to let compiler to optimize + * out some code if !CONFIG_THP_SWAP + */ +#define swap_entry_size(size) 1 #endif #define LATENCY_LIMIT 256 @@ -1192,18 +1200,7 @@ void swap_free(swp_entry_t entry) /* * Called after dropping swapcache to decrease refcnt to swap entries. */ -static void swapcache_free(swp_entry_t entry) -{ - struct swap_info_struct *p; - - p = _swap_info_get(entry); - if (p) { - if (!__swap_entry_free(p, entry, SWAP_HAS_CACHE)) - free_swap_slot(entry); - } -} - -static void swapcache_free_cluster(swp_entry_t entry) +void put_swap_page(struct page *page, swp_entry_t entry) { unsigned long offset = swp_offset(entry); unsigned long idx = offset / SWAPFILE_CLUSTER; @@ -1212,39 +1209,41 @@ static void swapcache_free_cluster(swp_e unsigned char *map; unsigned int i, free_entries = 0; unsigned char val; - - if (!IS_ENABLED(CONFIG_THP_SWAP)) - return; + int size = swap_entry_size(hpage_nr_pages(page)); si = _swap_info_get(entry); if (!si) return; - ci = lock_cluster(si, offset); - VM_BUG_ON(!cluster_is_huge(ci)); - map = si->swap_map + offset; - for (i = 0; i < SWAPFILE_CLUSTER; i++) { - val = map[i]; - VM_BUG_ON(!(val & SWAP_HAS_CACHE)); - if (val == SWAP_HAS_CACHE) - free_entries++; - } - if (!free_entries) { - for (i = 0; i < SWAPFILE_CLUSTER; i++) - map[i] &= ~SWAP_HAS_CACHE; - } - cluster_clear_huge(ci); - unlock_cluster(ci); - if (free_entries == SWAPFILE_CLUSTER) { - spin_lock(&si->lock); + if (size == SWAPFILE_CLUSTER) { ci = lock_cluster(si, offset); - memset(map, 0, SWAPFILE_CLUSTER); + VM_BUG_ON(!cluster_is_huge(ci)); + map = si->swap_map + offset; + for (i = 0; i < SWAPFILE_CLUSTER; i++) { + val = map[i]; + VM_BUG_ON(!(val & SWAP_HAS_CACHE)); + if (val == SWAP_HAS_CACHE) + free_entries++; + } + if (!free_entries) { + for (i = 0; i < SWAPFILE_CLUSTER; i++) + map[i] &= ~SWAP_HAS_CACHE; + } + cluster_clear_huge(ci); unlock_cluster(ci); - mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER); - swap_free_cluster(si, idx); - spin_unlock(&si->lock); - } else if (free_entries) { - for (i = 0; i < SWAPFILE_CLUSTER; i++, entry.val++) { + if (free_entries == SWAPFILE_CLUSTER) { + spin_lock(&si->lock); + ci = lock_cluster(si, offset); + memset(map, 0, SWAPFILE_CLUSTER); + unlock_cluster(ci); + mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER); + swap_free_cluster(si, idx); + spin_unlock(&si->lock); + return; + } + } + if (size == 1 || free_entries) { + for (i = 0; i < size; i++, entry.val++) { if (!__swap_entry_free(si, entry, SWAP_HAS_CACHE)) free_swap_slot(entry); } @@ -1268,14 +1267,6 @@ int split_swap_cluster(swp_entry_t entry } #endif -void put_swap_page(struct page *page, swp_entry_t entry) -{ - if (!PageTransHuge(page)) - swapcache_free(entry); - else - swapcache_free_cluster(entry); -} - static int swp_entry_cmp(const void *ent1, const void *ent2) { const swp_entry_t *e1 = ent1, *e2 = ent2; _ Patches currently in -mm which might be from ying.huang@xxxxxxxxx are mm-clear_huge_page-move-order-algorithm-into-a-separate-function.patch mm-huge-page-copy-target-sub-page-last-when-copy-huge-page.patch mm-hugetlbfs-rename-address-to-haddr-in-hugetlb_cow.patch mm-hugetlbfs-pass-fault-address-to-cow-handler.patch mm-swap-make-config_thp_swap-depends-on-config_swap.patch swap-add-comments-to-lock_cluster_or_swap_info.patch mm-swapfilec-replace-some-ifdef-with-is_enabled.patch swap-use-swap_count-in-swap_page_trans_huge_swapped.patch swap-unify-normal-huge-code-path-in-swap_page_trans_huge_swapped.patch swap-unify-normal-huge-code-path-in-put_swap_page.patch swap-get_swap_pages-use-entry_size-instead-of-cluster-in-parameter.patch swap-add-__swap_entry_free_locked.patch swap-put_swap_page-share-more-between-huge-normal-code-path.patch mm-swap-fix-race-between-swapoff-and-some-swap-operations.patch mm-swap-fix-race-between-swapoff-and-some-swap-operations-v6.patch mm-fix-race-between-swapoff-and-mincore.patch -- To unsubscribe from this list: send the line "unsubscribe mm-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html