Using lruvec locking to replace pgdat lru_lock. and then unfold compact_unlock_should_abort() to fit the replacement. Signed-off-by: Alex Shi <alex.shi@xxxxxxxxxxxxxxxxx> Cc: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Cc: Qian Cai <cai@xxxxxx> Cc: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Tejun Heo <tj@xxxxxxxxxx> Cc: cgroups@xxxxxxxxxxxxxxx Cc: linux-mm@xxxxxxxxx Cc: linux-kernel@xxxxxxxxxxxxxxx --- mm/compaction.c | 48 ++++++++++++++++++++++++++++++------------------ 1 file changed, 30 insertions(+), 18 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index 9a737f343183..8877f38410d8 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -785,7 +785,7 @@ static bool too_many_isolated(pg_data_t *pgdat) unsigned long nr_scanned = 0, nr_isolated = 0; struct lruvec *lruvec; unsigned long flags = 0; - bool locked = false; + struct lruvec *locked_lruvec = NULL; struct page *page = NULL, *valid_page = NULL; unsigned long start_pfn = low_pfn; bool skip_on_failure = false; @@ -845,11 +845,20 @@ static bool too_many_isolated(pg_data_t *pgdat) * contention, to give chance to IRQs. Abort completely if * a fatal signal is pending. */ - if (!(low_pfn % SWAP_CLUSTER_MAX) - && compact_unlock_should_abort(&pgdat->lruvec.lru_lock, - flags, &locked, cc)) { - low_pfn = 0; - goto fatal_pending; + if (!(low_pfn % SWAP_CLUSTER_MAX)) { + if (locked_lruvec) { + spin_unlock_irqrestore(&locked_lruvec->lru_lock, flags); + locked_lruvec = NULL; + } + + if (fatal_signal_pending(current)) { + cc->contended = true; + + low_pfn = 0; + goto fatal_pending; + } + + cond_resched(); } if (!pfn_valid_within(low_pfn)) @@ -918,10 +927,10 @@ static bool too_many_isolated(pg_data_t *pgdat) */ if (unlikely(__PageMovable(page)) && !PageIsolated(page)) { - if (locked) { - spin_unlock_irqrestore(&pgdat->lruvec.lru_lock, + if (locked_lruvec) { + spin_unlock_irqrestore(&locked_lruvec->lru_lock, flags); - locked = false; + locked_lruvec = NULL; } if (!isolate_movable_page(page, isolate_mode)) @@ -947,10 +956,14 @@ static bool too_many_isolated(pg_data_t *pgdat) if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page)) goto isolate_fail; + lruvec = mem_cgroup_page_lruvec(page, pgdat); + /* If we already hold the lock, we can skip some rechecking */ - if (!locked) { - locked = compact_lock_irqsave(&pgdat->lruvec.lru_lock, - &flags, cc); + if (lruvec != locked_lruvec) { + if (compact_lock_irqsave(&lruvec->lru_lock, &flags, cc)) + locked_lruvec = lruvec; + + sync_lruvec_pgdat(lruvec, pgdat); /* Try get exclusive access under lock */ if (!skip_updated) { @@ -974,7 +987,6 @@ static bool too_many_isolated(pg_data_t *pgdat) } } - lruvec = mem_cgroup_page_lruvec(page, pgdat); /* Try isolate the page */ if (__isolate_lru_page(page, isolate_mode) != 0) @@ -1015,9 +1027,9 @@ static bool too_many_isolated(pg_data_t *pgdat) * page anyway. */ if (nr_isolated) { - if (locked) { - spin_unlock_irqrestore(&pgdat->lruvec.lru_lock, flags); - locked = false; + if (locked_lruvec) { + spin_unlock_irqrestore(&locked_lruvec->lru_lock, flags); + locked_lruvec = NULL; } putback_movable_pages(&cc->migratepages); cc->nr_migratepages = 0; @@ -1042,8 +1054,8 @@ static bool too_many_isolated(pg_data_t *pgdat) low_pfn = end_pfn; isolate_abort: - if (locked) - spin_unlock_irqrestore(&pgdat->lruvec.lru_lock, flags); + if (locked_lruvec) + spin_unlock_irqrestore(&locked_lruvec->lru_lock, flags); /* * Updated the cached scanner pfn once the pageblock has been scanned -- 1.8.3.1