This patch adds a dedicated shrinker targeting to free unneeded memory consumed by a number of erofs in-memory data structures. Like F2FS and UBIFS, it also adds: - sbi->umount_mutex to avoid races on shrinker and put_super; - sbi->shrinker_run_no to not revisit recently scanned objects. Signed-off-by: Gao Xiang <gaoxiang25@xxxxxxxxxx> --- fs/erofs/internal.h | 7 ++++ fs/erofs/super.c | 6 +++ fs/erofs/utils.c | 94 ++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 106 insertions(+), 1 deletion(-) diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 942e3cbe6076..7d4939463518 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -64,6 +64,9 @@ struct erofs_sb_info { #ifdef CONFIG_EROFS_FS_ZIP /* list for all registered superblocks, mainly for shrinker */ struct list_head list; + struct mutex umount_mutex; + + unsigned int shrinker_run_no; #endif u32 blocks; u32 meta_blkaddr; @@ -412,9 +415,13 @@ extern const struct file_operations erofs_dir_fops; #ifdef CONFIG_EROFS_FS_ZIP void erofs_shrinker_register(struct super_block *sb); void erofs_shrinker_unregister(struct super_block *sb); +int __init erofs_init_shrinker(void); +void erofs_exit_shrinker(void); #else static inline void erofs_shrinker_register(struct super_block *sb) {} static inline void erofs_shrinker_unregister(struct super_block *sb) {} +static inline int erofs_init_shrinker(void) { return 0; } +static inline void erofs_exit_shrinker(void) {} #endif #endif diff --git a/fs/erofs/super.c b/fs/erofs/super.c index c8bd77e3c355..6d0a383527b6 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -415,6 +415,9 @@ static int __init erofs_module_init(void) if (err) goto icache_err; + err = erofs_init_shrinker(); + if (err) + goto shrinker_err; err = register_filesystem(&erofs_fs_type); if (err) goto fs_err; @@ -423,6 +426,8 @@ static int __init erofs_module_init(void) return 0; fs_err: + erofs_exit_shrinker(); +shrinker_err: erofs_exit_inode_cache(); icache_err: return err; @@ -431,6 +436,7 @@ static int __init erofs_module_init(void) static void __exit erofs_module_exit(void) { unregister_filesystem(&erofs_fs_type); + erofs_exit_shrinker(); erofs_exit_inode_cache(); infoln("successfully finalize erofs"); } diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c index 08c8752895bc..727d3831b5c9 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/utils.c @@ -9,6 +9,12 @@ #include "internal.h" #ifdef CONFIG_EROFS_FS_ZIP +/* global shrink count (for all mounted EROFS instances) */ +static atomic_long_t erofs_global_shrink_cnt; + +/* protected by 'erofs_sb_list_lock' */ +static unsigned int shrinker_run_no; + /* protects the mounted 'erofs_sb_list' */ static DEFINE_SPINLOCK(erofs_sb_list_lock); static LIST_HEAD(erofs_sb_list); @@ -17,6 +23,8 @@ void erofs_shrinker_register(struct super_block *sb) { struct erofs_sb_info *sbi = EROFS_SB(sb); + mutex_init(&sbi->umount_mutex); + spin_lock(&erofs_sb_list_lock); list_add(&sbi->list, &erofs_sb_list); spin_unlock(&erofs_sb_list_lock); @@ -24,9 +32,93 @@ void erofs_shrinker_register(struct super_block *sb) void erofs_shrinker_unregister(struct super_block *sb) { + struct erofs_sb_info *const sbi = EROFS_SB(sb); + + mutex_lock(&sbi->umount_mutex); + /* will add shrink final handler here */ + spin_lock(&erofs_sb_list_lock); - list_del(&EROFS_SB(sb)->list); + list_del(&sbi->list); spin_unlock(&erofs_sb_list_lock); + mutex_unlock(&sbi->umount_mutex); +} + +static unsigned long erofs_shrink_count(struct shrinker *shrink, + struct shrink_control *sc) +{ + return atomic_long_read(&erofs_global_shrink_cnt); } + +static unsigned long erofs_shrink_scan(struct shrinker *shrink, + struct shrink_control *sc) +{ + struct erofs_sb_info *sbi; + struct list_head *p; + + unsigned long nr = sc->nr_to_scan; + unsigned int run_no; + unsigned long freed = 0; + + spin_lock(&erofs_sb_list_lock); + do { + run_no = ++shrinker_run_no; + } while (run_no == 0); + + /* Iterate over all mounted superblocks and try to shrink them */ + p = erofs_sb_list.next; + while (p != &erofs_sb_list) { + sbi = list_entry(p, struct erofs_sb_info, list); + + /* + * We move the ones we do to the end of the list, so we stop + * when we see one we have already done. + */ + if (sbi->shrinker_run_no == run_no) + break; + + if (!mutex_trylock(&sbi->umount_mutex)) { + p = p->next; + continue; + } + + spin_unlock(&erofs_sb_list_lock); + sbi->shrinker_run_no = run_no; + + /* will add shrink handler here */ + + spin_lock(&erofs_sb_list_lock); + /* Get the next list element before we move this one */ + p = p->next; + + /* + * Move this one to the end of the list to provide some + * fairness. + */ + list_move_tail(&sbi->list, &erofs_sb_list); + mutex_unlock(&sbi->umount_mutex); + + if (freed >= nr) + break; + } + spin_unlock(&erofs_sb_list_lock); + return freed; +} + +static struct shrinker erofs_shrinker_info = { + .scan_objects = erofs_shrink_scan, + .count_objects = erofs_shrink_count, + .seeks = DEFAULT_SEEKS, +}; + +int __init erofs_init_shrinker(void) +{ + return register_shrinker(&erofs_shrinker_info); +} + +void erofs_exit_shrinker(void) +{ + unregister_shrinker(&erofs_shrinker_info); +} + #endif -- 2.17.1