count all pages because many pages might be off LRU already. Signed-off-by: Michal Hocko <mhocko@xxxxxxx> --- mm/swap.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/mm/swap.c b/mm/swap.c index 39affa1932ce..8a12b33936b4 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -911,13 +911,22 @@ void release_pages(struct page **pages, int nr, bool cold) if (unlikely(PageCompound(page))) { if (zone) { spin_unlock_irqrestore(&zone->lru_lock, flags); - lock_batch = 0; zone = NULL; } put_compound_page(page); continue; } + /* + * Make sure the IRQ-safe lock-holding time does not get + * excessive with a continuous string of pages from the + * same zone. The lock is held only if zone != NULL. + */ + if (zone && ++lock_batch == SWAP_CLUSTER_MAX) { + spin_unlock_irqrestore(&zone->lru_lock, flags); + zone = NULL; + } + if (!put_page_testzero(page)) continue; @@ -937,16 +946,6 @@ void release_pages(struct page **pages, int nr, bool cold) VM_BUG_ON_PAGE(!PageLRU(page), page); __ClearPageLRU(page); del_page_from_lru_list(page, lruvec, page_off_lru(page)); - - /* - * Make sure the IRQ-safe lock-holding time - * does not get excessive with a continuous - * string of pages from the same zone. - */ - if (++lock_batch == SWAP_CLUSTER_MAX) { - spin_unlock_irqrestore(&zone->lru_lock, flags); - zone = NULL; - } } /* Clear Active bit in case of parallel mark_page_accessed */ -- 2.1.1 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>