After commit c60aa176c6de8 ("swapfile: swap allocation cycle if nonrot"), swap allocation is cyclic. Current approach is done with two separate loop on the upper and lower half. This looks a little redundant. >From another point of view, the loop iterates [lowest_bit, highest_bit] range starting with (offset + 1) but except scan_base. So we can simplify the loop with condition (next_offset() != scan_base) by introducing next_offset() which makes sure offset fit in that range with correct order. Signed-off-by: Wei Yang <richard.weiyang@xxxxxxxxx> CC: Hugh Dickins <hughd@xxxxxxxxxx> --- mm/swapfile.c | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index 95024f9b691a..42c5c2010bfc 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -729,6 +729,14 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset, } } +static unsigned long next_offset(struct swap_info_struct *si, + unsigned long *offset) +{ + if (++(*offset) > si->highest_bit) + *offset = si->lowest_bit; + return *offset; +} + static int scan_swap_map_slots(struct swap_info_struct *si, unsigned char usage, int nr, swp_entry_t slots[]) @@ -883,7 +891,7 @@ static int scan_swap_map_slots(struct swap_info_struct *si, scan: spin_unlock(&si->lock); - while (++offset <= si->highest_bit) { + while (next_offset(si, &offset) != scan_base) { if (!si->swap_map[offset]) { spin_lock(&si->lock); goto checks; @@ -897,22 +905,6 @@ static int scan_swap_map_slots(struct swap_info_struct *si, latency_ration = LATENCY_LIMIT; } } - offset = si->lowest_bit; - while (offset < scan_base) { - if (!si->swap_map[offset]) { - spin_lock(&si->lock); - goto checks; - } - if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { - spin_lock(&si->lock); - goto checks; - } - if (unlikely(--latency_ration < 0)) { - cond_resched(); - latency_ration = LATENCY_LIMIT; - } - offset++; - } spin_lock(&si->lock); no_page: -- 2.23.0