The patch titled Subject: mm: multi-gen LRU: thrashing prevention has been added to the -mm mm-unstable branch. Its filename is mm-multi-gen-lru-thrashing-prevention.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-multi-gen-lru-thrashing-prevention.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Yu Zhao <yuzhao@xxxxxxxxxx> Subject: mm: multi-gen LRU: thrashing prevention Date: Mon, 15 Aug 2022 01:13:30 -0600 Add /sys/kernel/mm/lru_gen/min_ttl_ms for thrashing prevention, as requested by many desktop users [1]. When set to value N, it prevents the working set of N milliseconds from getting evicted. The OOM killer is triggered if this working set cannot be kept in memory. Based on the average human detectable lag (~100ms), N=1000 usually eliminates intolerable lags due to thrashing. Larger values like N=3000 make lags less noticeable at the risk of premature OOM kills. Compared with the size-based approach [2], this time-based approach has the following advantages: 1. It is easier to configure because it is agnostic to applications and memory sizes. 2. It is more reliable because it is directly wired to the OOM killer. [1] https://lore.kernel.org/r/Ydza%2FzXKY9ATRoh6@xxxxxxxxxx/ [2] https://lore.kernel.org/r/20101028191523.GA14972@xxxxxxxxxx/ Link: https://lkml.kernel.org/r/20220815071332.627393-12-yuzhao@xxxxxxxxxx Signed-off-by: Yu Zhao <yuzhao@xxxxxxxxxx> Acked-by: Brian Geffon <bgeffon@xxxxxxxxxx> Acked-by: Jan Alexander Steffens (heftig) <heftig@xxxxxxxxxxxxx> Acked-by: Oleksandr Natalenko <oleksandr@xxxxxxxxxxxxxx> Acked-by: Steven Barrett <steven@xxxxxxxxxxxx> Acked-by: Suleiman Souhlal <suleiman@xxxxxxxxxx> Tested-by: Daniel Byrne <djbyrne@xxxxxxx> Tested-by: Donald Carr <d@xxxxxxxxxxxxxxx> Tested-by: Holger Hoffstätte <holger@xxxxxxxxxxxxxxxxxxxxxx> Tested-by: Konstantin Kharlamov <Hi-Angel@xxxxxxxxx> Tested-by: Shuang Zhai <szhai2@xxxxxxxxxxxxxxxx> Tested-by: Sofia Trinh <sofia.trinh@edi.works> Tested-by: Vaibhav Jain <vaibhav@xxxxxxxxxxxxx> Cc: Andi Kleen <ak@xxxxxxxxxxxxxxx> Cc: "Aneesh Kumar K.V" <aneesh.kumar@xxxxxxxxxxxxx> Cc: Barry Song <baohua@xxxxxxxxxx> Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx> Cc: Hillf Danton <hdanton@xxxxxxxx> Cc: Jens Axboe <axboe@xxxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: Jonathan Corbet <corbet@xxxxxxx> Cc: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Cc: Mel Gorman <mgorman@xxxxxxx> Cc: Miaohe Lin <linmiaohe@xxxxxxxxxx> Cc: Michael Larabel <Michael@xxxxxxxxxxxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxxxx> Cc: Mike Rapoport <rppt@xxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Qi Zheng <zhengqi.arch@xxxxxxxxxxxxx> Cc: Tejun Heo <tj@xxxxxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/mmzone.h | 2 + mm/vmscan.c | 75 ++++++++++++++++++++++++++++++++++++--- 2 files changed, 73 insertions(+), 4 deletions(-) --- a/include/linux/mmzone.h~mm-multi-gen-lru-thrashing-prevention +++ a/include/linux/mmzone.h @@ -422,6 +422,8 @@ struct lru_gen_struct { unsigned long max_seq; /* the eviction increments the oldest generation numbers */ unsigned long min_seq[ANON_AND_FILE]; + /* the birth time of each generation in jiffies */ + unsigned long timestamps[MAX_NR_GENS]; /* the multi-gen LRU lists, lazily sorted on eviction */ struct list_head lists[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES]; /* the multi-gen LRU sizes, eventually consistent */ --- a/mm/vmscan.c~mm-multi-gen-lru-thrashing-prevention +++ a/mm/vmscan.c @@ -4298,6 +4298,7 @@ static void inc_max_seq(struct lruvec *l for (type = 0; type < ANON_AND_FILE; type++) reset_ctrl_pos(lruvec, type, false); + WRITE_ONCE(lrugen->timestamps[next], jiffies); /* make sure preceding modifications appear */ smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1); @@ -4424,7 +4425,7 @@ static unsigned long get_nr_evictable(st return total; } -static void age_lruvec(struct lruvec *lruvec, struct scan_control *sc) +static bool age_lruvec(struct lruvec *lruvec, struct scan_control *sc, unsigned long min_ttl) { bool need_aging; unsigned long nr_to_scan; @@ -4438,21 +4439,40 @@ static void age_lruvec(struct lruvec *lr mem_cgroup_calculate_protection(NULL, memcg); if (mem_cgroup_below_min(memcg)) - return; + return false; nr_to_scan = get_nr_evictable(lruvec, max_seq, min_seq, swappiness, &need_aging); if (!nr_to_scan) - return; + return false; nr_to_scan >>= mem_cgroup_online(memcg) ? sc->priority : 0; + if (min_ttl) { + int gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]); + unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); + + if (time_is_after_jiffies(birth + min_ttl)) + return false; + + /* the size is likely too small to be helpful */ + if (!nr_to_scan && sc->priority != DEF_PRIORITY) + return false; + } + if (nr_to_scan && need_aging) try_to_inc_max_seq(lruvec, max_seq, sc, swappiness); + + return true; } +/* to protect the working set of the last N jiffies */ +static unsigned long lru_gen_min_ttl __read_mostly; + static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) { struct mem_cgroup *memcg; + bool success = false; + unsigned long min_ttl = READ_ONCE(lru_gen_min_ttl); VM_WARN_ON_ONCE(!current_is_kswapd()); @@ -4478,12 +4498,32 @@ static void lru_gen_age_node(struct pgli do { struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); - age_lruvec(lruvec, sc); + if (age_lruvec(lruvec, sc, min_ttl)) + success = true; cond_resched(); } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); clear_mm_walk(); + + /* check the order to exclude compaction-induced reclaim */ + if (success || !min_ttl || sc->order) + return; + + /* + * The main goal is to OOM kill if every generation from all memcgs is + * younger than min_ttl. However, another possibility is all memcgs are + * either below min or empty. + */ + if (mutex_trylock(&oom_lock)) { + struct oom_control oc = { + .gfp_mask = sc->gfp_mask, + }; + + out_of_memory(&oc); + + mutex_unlock(&oom_lock); + } } /* @@ -5210,6 +5250,28 @@ unlock: * sysfs interface ******************************************************************************/ +static ssize_t show_min_ttl(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", jiffies_to_msecs(READ_ONCE(lru_gen_min_ttl))); +} + +static ssize_t store_min_ttl(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t len) +{ + unsigned int msecs; + + if (kstrtouint(buf, 0, &msecs)) + return -EINVAL; + + WRITE_ONCE(lru_gen_min_ttl, msecs_to_jiffies(msecs)); + + return len; +} + +static struct kobj_attribute lru_gen_min_ttl_attr = __ATTR( + min_ttl_ms, 0644, show_min_ttl, store_min_ttl +); + static ssize_t show_enabled(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { unsigned int caps = 0; @@ -5258,6 +5320,7 @@ static struct kobj_attribute lru_gen_ena ); static struct attribute *lru_gen_attrs[] = { + &lru_gen_min_ttl_attr.attr, &lru_gen_enabled_attr.attr, NULL }; @@ -5273,12 +5336,16 @@ static struct attribute_group lru_gen_at void lru_gen_init_lruvec(struct lruvec *lruvec) { + int i; int gen, type, zone; struct lru_gen_struct *lrugen = &lruvec->lrugen; lrugen->max_seq = MIN_NR_GENS + 1; lrugen->enabled = lru_gen_enabled(); + for (i = 0; i <= MIN_NR_GENS + 1; i++) + lrugen->timestamps[i] = jiffies; + for_each_gen_type_zone(gen, type, zone) INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]); _ Patches currently in -mm which might be from yuzhao@xxxxxxxxxx are mm-x86-arm64-add-arch_has_hw_pte_young.patch mm-x86-add-config_arch_has_nonleaf_pmd_young.patch mm-vmscanc-refactor-shrink_node.patch revert-include-linux-mm_inlineh-fold-__update_lru_size-into-its-sole-caller.patch mm-multi-gen-lru-groundwork.patch mm-multi-gen-lru-minimal-implementation.patch mm-multi-gen-lru-exploit-locality-in-rmap.patch mm-multi-gen-lru-support-page-table-walks.patch mm-multi-gen-lru-optimize-multiple-memcgs.patch mm-multi-gen-lru-kill-switch.patch mm-multi-gen-lru-thrashing-prevention.patch mm-multi-gen-lru-debugfs-interface.patch mm-multi-gen-lru-admin-guide.patch mm-multi-gen-lru-design-doc.patch