This is a note to let you know that I've just added the patch titled f2fs: do FG_GC when GC boosting is required for zoned devices to the 6.11-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: f2fs-do-fg_gc-when-gc-boosting-is-required-for-zoned.patch and it can be found in the queue-6.11 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit 0605fad05757d633c99c50abae8bc784fda87094 Author: Daeho Jeong <daehojeong@xxxxxxxxxx> Date: Mon Sep 9 15:19:44 2024 -0700 f2fs: do FG_GC when GC boosting is required for zoned devices [ Upstream commit 9748c2ddea4a3f46a498bff4cf2bf9a5629e3f8b ] Under low free section count, we need to use FG_GC instead of BG_GC to recover free sections. Signed-off-by: Daeho Jeong <daehojeong@xxxxxxxxxx> Reviewed-by: Chao Yu <chao@xxxxxxxxxx> Signed-off-by: Jaegeuk Kim <jaegeuk@xxxxxxxxxx> Stable-dep-of: 5cc69a27abfa ("f2fs: forcibly migrate to secure space for zoned device file pinning") Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index bfd97498cd3da..3bdfa53b24bb9 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1301,6 +1301,7 @@ struct f2fs_gc_control { bool no_bg_gc; /* check the space and stop bg_gc */ bool should_migrate_blocks; /* should migrate blocks */ bool err_gc_skipped; /* return EAGAIN if GC skipped */ + bool one_time; /* require one time GC in one migration unit */ unsigned int nr_free_secs; /* # of free sections to do GC */ }; diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 9a3d3994cf2ba..a59fec64eccfb 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -81,6 +81,8 @@ static int gc_thread_func(void *data) continue; } + gc_control.one_time = false; + /* * [GC triggering condition] * 0. GC is not conducted currently. @@ -126,15 +128,19 @@ static int gc_thread_func(void *data) wait_ms = gc_th->max_sleep_time; } - if (need_to_boost_gc(sbi)) + if (need_to_boost_gc(sbi)) { decrease_sleep_time(gc_th, &wait_ms); - else + if (f2fs_sb_has_blkzoned(sbi)) + gc_control.one_time = true; + } else { increase_sleep_time(gc_th, &wait_ms); + } do_gc: stat_inc_gc_call_count(sbi, foreground ? FOREGROUND : BACKGROUND); - sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC; + sync_mode = (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) || + gc_control.one_time; /* foreground GC was been triggered via f2fs_balance_fs() */ if (foreground) @@ -1701,7 +1707,7 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int start_segno, struct gc_inode_list *gc_list, int gc_type, - bool force_migrate) + bool force_migrate, bool one_time) { struct page *sum_page; struct f2fs_summary_block *sum; @@ -1728,7 +1734,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, sec_end_segno -= SEGS_PER_SEC(sbi) - f2fs_usable_segs_in_sec(sbi, segno); - if (gc_type == BG_GC) { + if (gc_type == BG_GC || one_time) { unsigned int window_granularity = sbi->migration_window_granularity; @@ -1911,7 +1917,8 @@ int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control) } seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, - gc_control->should_migrate_blocks); + gc_control->should_migrate_blocks, + gc_control->one_time); if (seg_freed < 0) goto stop; @@ -1922,6 +1929,9 @@ int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control) total_sec_freed++; } + if (gc_control->one_time) + goto stop; + if (gc_type == FG_GC) { sbi->cur_victim_sec = NULL_SEGNO; @@ -2047,7 +2057,7 @@ int f2fs_gc_range(struct f2fs_sb_info *sbi, }; do_garbage_collect(sbi, segno, &gc_list, FG_GC, - dry_run_sections == 0); + dry_run_sections == 0, false); put_gc_inode(&gc_list); if (!dry_run && get_valid_blocks(sbi, segno, true))