The patch titled Subject: mm: list_lru: add lock_irq member to __list_lru_init() has been removed from the -mm tree. Its filename was mm-list_lru-add-lock_irq-member-to-__list_lru_init.patch This patch was dropped because it is obsolete ------------------------------------------------------ From: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx> Subject: mm: list_lru: add lock_irq member to __list_lru_init() scan_shadow_nodes() is the only user of __list_lru_walk_one() which disables interrupts before invoking it. The reason is that nlru->lock is nesting inside IRQ-safe i_pages lock. Some functions unconditionally acquire the lock with the _irq() suffix. __list_lru_walk_one() can't acquire the lock unconditionally with _irq() suffix because it might invoke a callback which unlocks the nlru->lock and invokes a sleeping function without enabling interrupts. Add an argument to __list_lru_init() which identifies whether the nlru->lock needs to be acquired with disabling interrupts or without. Link: http://lkml.kernel.org/r/20180622151221.28167-4-bigeasy@xxxxxxxxxxxxx Signed-off-by: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx> Reviewed-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Vladimir Davydov <vdavydov.dev@xxxxxxxxx> Cc: Kirill Tkhai <ktkhai@xxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/list_lru.h | 12 ++++++++---- mm/list_lru.c | 14 ++++++++++---- mm/workingset.c | 12 ++++-------- 3 files changed, 22 insertions(+), 16 deletions(-) diff -puN include/linux/list_lru.h~mm-list_lru-add-lock_irq-member-to-__list_lru_init include/linux/list_lru.h --- a/include/linux/list_lru.h~mm-list_lru-add-lock_irq-member-to-__list_lru_init +++ a/include/linux/list_lru.h @@ -51,18 +51,22 @@ struct list_lru_node { struct list_lru { struct list_lru_node *node; + bool lock_irq; #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) struct list_head list; #endif }; void list_lru_destroy(struct list_lru *lru); -int __list_lru_init(struct list_lru *lru, bool memcg_aware, +int __list_lru_init(struct list_lru *lru, bool memcg_aware, bool lock_irq, struct lock_class_key *key); -#define list_lru_init(lru) __list_lru_init((lru), false, NULL) -#define list_lru_init_key(lru, key) __list_lru_init((lru), false, (key)) -#define list_lru_init_memcg(lru) __list_lru_init((lru), true, NULL) +#define list_lru_init(lru) __list_lru_init((lru), false, false, \ + NULL) +#define list_lru_init_key(lru, key) __list_lru_init((lru), false, false, \ + (key)) +#define list_lru_init_memcg(lru) __list_lru_init((lru), true, false, \ + NULL) int memcg_update_all_list_lrus(int num_memcgs); void memcg_drain_all_list_lrus(int src_idx, int dst_idx); diff -puN mm/list_lru.c~mm-list_lru-add-lock_irq-member-to-__list_lru_init mm/list_lru.c --- a/mm/list_lru.c~mm-list_lru-add-lock_irq-member-to-__list_lru_init +++ a/mm/list_lru.c @@ -204,7 +204,10 @@ __list_lru_walk_one(struct list_lru *lru struct list_head *item, *n; unsigned long isolated = 0; - spin_lock(&nlru->lock); + if (lru->lock_irq) + spin_lock_irq(&nlru->lock); + else + spin_lock(&nlru->lock); l = list_lru_from_memcg_idx(nlru, memcg_idx); restart: list_for_each_safe(item, n, &l->list) { @@ -251,7 +254,10 @@ restart: } } - spin_unlock(&nlru->lock); + if (lru->lock_irq) + spin_unlock_irq(&nlru->lock); + else + spin_unlock(&nlru->lock); return isolated; } @@ -553,7 +559,7 @@ static void memcg_destroy_list_lru(struc } #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ -int __list_lru_init(struct list_lru *lru, bool memcg_aware, +int __list_lru_init(struct list_lru *lru, bool memcg_aware, bool lock_irq, struct lock_class_key *key) { int i; @@ -580,7 +586,7 @@ int __list_lru_init(struct list_lru *lru lru->node = NULL; goto out; } - + lru->lock_irq = lock_irq; list_lru_register(lru); out: memcg_put_cache_ids(); diff -puN mm/workingset.c~mm-list_lru-add-lock_irq-member-to-__list_lru_init mm/workingset.c --- a/mm/workingset.c~mm-list_lru-add-lock_irq-member-to-__list_lru_init +++ a/mm/workingset.c @@ -480,13 +480,8 @@ out: static unsigned long scan_shadow_nodes(struct shrinker *shrinker, struct shrink_control *sc) { - unsigned long ret; - - /* list_lru lock nests inside the IRQ-safe i_pages lock */ - local_irq_disable(); - ret = list_lru_shrink_walk(&shadow_nodes, sc, shadow_lru_isolate, NULL); - local_irq_enable(); - return ret; + return list_lru_shrink_walk(&shadow_nodes, sc, shadow_lru_isolate, + NULL); } static struct shrinker workingset_shadow_shrinker = { @@ -523,7 +518,8 @@ static int __init workingset_init(void) pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", timestamp_bits, max_order, bucket_order); - ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key); + /* list_lru lock nests inside the IRQ-safe i_pages lock */ + ret = __list_lru_init(&shadow_nodes, true, true, &shadow_nodes_key); if (ret) goto err; ret = register_shrinker(&workingset_shadow_shrinker); _ Patches currently in -mm which might be from bigeasy@xxxxxxxxxxxxx are ntfs-dont-disable-interrupts-during-kmap_atomic.patch mm-workingset-remove-local_irq_disable-from-count_shadow_nodes.patch mm-workingset-make-shadow_lru_isolate-use-locking-suffix.patch -- To unsubscribe from this list: send the line "unsubscribe mm-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html