The quilt patch titled Subject: mm/swap: add helper swap_offset_available() has been removed from the -mm tree. Its filename was mm-swap-add-helper-swap_offset_available.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: Miaohe Lin <linmiaohe@xxxxxxxxxx> Subject: mm/swap: add helper swap_offset_available() Add helper swap_offset_available() to remove some duplicated codes. Minor readability improvement. [akpm@xxxxxxxxxxxxxxxxxxxx: s/swap_offset_available/swap_offset_available_and_locked/, per Neil] Link: https://lkml.kernel.org/r/20220509131416.17553-12-linmiaohe@xxxxxxxxxx Signed-off-by: Miaohe Lin <linmiaohe@xxxxxxxxxx> Cc: Alistair Popple <apopple@xxxxxxxxxx> Cc: David Hildenbrand <david@xxxxxxxxxx> Cc: David Howells <dhowells@xxxxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Naoya Horiguchi <naoya.horiguchi@xxxxxxx> Cc: NeilBrown <neilb@xxxxxxx> Cc: Peter Xu <peterx@xxxxxxxxxx> Cc: Suren Baghdasaryan <surenb@xxxxxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Cc: Oscar Salvador <osalvador@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/swapfile.c | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) --- a/mm/swapfile.c~mm-swap-add-helper-swap_offset_available +++ a/mm/swapfile.c @@ -775,6 +775,22 @@ static void set_cluster_next(struct swap this_cpu_write(*si->cluster_next_cpu, next); } +static bool swap_offset_available_and_locked(struct swap_info_struct *si, + unsigned long offset) +{ + if (data_race(!si->swap_map[offset])) { + spin_lock(&si->lock); + return true; + } + + if (vm_swap_full() && READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) { + spin_lock(&si->lock); + return true; + } + + return false; +} + static int scan_swap_map_slots(struct swap_info_struct *si, unsigned char usage, int nr, swp_entry_t slots[]) @@ -952,15 +968,8 @@ done: scan: spin_unlock(&si->lock); while (++offset <= READ_ONCE(si->highest_bit)) { - if (data_race(!si->swap_map[offset])) { - spin_lock(&si->lock); - goto checks; - } - if (vm_swap_full() && - READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) { - spin_lock(&si->lock); + if (swap_offset_available_and_locked(si, offset)) goto checks; - } if (unlikely(--latency_ration < 0)) { cond_resched(); latency_ration = LATENCY_LIMIT; @@ -969,15 +978,8 @@ scan: } offset = si->lowest_bit; while (offset < scan_base) { - if (data_race(!si->swap_map[offset])) { - spin_lock(&si->lock); + if (swap_offset_available_and_locked(si, offset)) goto checks; - } - if (vm_swap_full() && - READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) { - spin_lock(&si->lock); - goto checks; - } if (unlikely(--latency_ration < 0)) { cond_resched(); latency_ration = LATENCY_LIMIT; _ Patches currently in -mm which might be from linmiaohe@xxxxxxxxxx are mm-z3fold-fix-sheduling-while-atomic.patch mm-z3fold-fix-possible-null-pointer-dereferencing.patch mm-z3fold-remove-buggy-use-of-stale-list-for-allocation.patch mm-z3fold-throw-warning-on-failure-of-trylock_page-in-z3fold_alloc.patch revert-mm-z3foldc-allow-__gfp_highmem-in-z3fold_alloc.patch mm-z3fold-put-z3fold-page-back-into-unbuddied-list-when-reclaim-or-migration-fails.patch mm-z3fold-always-clear-page_claimed-under-z3fold-page-lock.patch mm-z3fold-fix-z3fold_reclaim_page-races-with-z3fold_free.patch mm-z3fold-fix-z3fold_page_migrate-races-with-z3fold_map.patch mm-swapfile-unuse_pte-can-map-random-data-if-swap-read-fails.patch mm-swapfile-fix-lost-swap-bits-in-unuse_pte.patch mm-madvise-free-hwpoison-and-swapin-error-entry-in-madvise_free_pte_range.patch mm-shmem-fix-infinite-loop-when-swap-in-shmem-error-at-swapoff-time.patch mm-filter-out-swapin-error-entry-in-shmem-mapping.patch