Currently memcg still use per node pgdat->lru_lock to guard its lruvec. That causes some lru_lock contention in a high container density system. If we can use per lruvec lock, that could relief much of the lru_lock contention. The later patches will replace the pgdat->lru_lock with lruvec->lru_lock and show the performance benefit by benchmarks. Signed-off-by: Alex Shi <alex.shi@xxxxxxxxxxxxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Cc: Dan Williams <dan.j.williams@xxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxx> Cc: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> Cc: Wei Yang <richard.weiyang@xxxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: Arun KS <arunks@xxxxxxxxxxxxxx> Cc: Konstantin Khlebnikov <khlebnikov@xxxxxxxxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: cgroups@xxxxxxxxxxxxxxx Cc: linux-mm@xxxxxxxxx Cc: linux-kernel@xxxxxxxxxxxxxxx --- include/linux/mmzone.h | 2 ++ mm/mmzone.c | 1 + 2 files changed, 3 insertions(+) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 07b784ffbb14..a13b8a602ee5 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -267,6 +267,8 @@ struct lruvec { unsigned long refaults; /* Various lruvec state flags (enum lruvec_flags) */ unsigned long flags; + /* per lruvec lru_lock for memcg */ + spinlock_t lru_lock; #ifdef CONFIG_MEMCG struct pglist_data *pgdat; #endif diff --git a/mm/mmzone.c b/mm/mmzone.c index 4686fdc23bb9..3750a90ed4a0 100644 --- a/mm/mmzone.c +++ b/mm/mmzone.c @@ -91,6 +91,7 @@ void lruvec_init(struct lruvec *lruvec) enum lru_list lru; memset(lruvec, 0, sizeof(struct lruvec)); + spin_lock_init(&lruvec->lru_lock); for_each_lru(lru) INIT_LIST_HEAD(&lruvec->lists[lru]); -- 1.8.3.1