Split the codes that inserts and merges ext4_free_data structures into a new interface ext4_insert_free_data. This is preparing for following async background discard. Signed-off-by: Wang Jianchao <wangjianchao@xxxxxxxxxxxx> --- fs/ext4/mballoc.c | 96 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 51 insertions(+), 45 deletions(-) diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 85418cf..16f06d2 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -350,6 +350,12 @@ static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, ext4_group_t group); static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac); +static inline struct ext4_free_data *efd_entry(struct rb_node *n) +{ + return rb_entry_safe(n, struct ext4_free_data, efd_node); +} +static int ext4_insert_free_data(struct ext4_sb_info *sbi, + struct rb_root *root, struct ext4_free_data *nfd); /* * The algorithm using this percpu seq counter goes below: @@ -5069,28 +5075,53 @@ static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi, kmem_cache_free(ext4_free_data_cachep, entry); } +static int ext4_insert_free_data(struct ext4_sb_info *sbi, + struct rb_root *root, struct ext4_free_data *nfd) +{ + struct rb_node **n = &root->rb_node; + struct rb_node *p = NULL; + struct ext4_free_data *fd; + + while (*n) { + p = *n; + fd = rb_entry(p, struct ext4_free_data, efd_node); + if (nfd->efd_start_cluster < fd->efd_start_cluster) + n = &(*n)->rb_left; + else if (nfd->efd_start_cluster >= + (fd->efd_start_cluster + fd->efd_count)) + n = &(*n)->rb_right; + else + return -EINVAL; + } + + rb_link_node(&nfd->efd_node, p, n); + rb_insert_color(&nfd->efd_node, root); + + /* Now try to see the extent can be merged to left and right */ + fd = efd_entry(rb_prev(&nfd->efd_node)); + if (fd) + ext4_try_merge_freed_extent(sbi, fd, nfd, root); + + fd = efd_entry(rb_next(&nfd->efd_node)); + if (fd) + ext4_try_merge_freed_extent(sbi, fd, nfd, root); + + return 0; +} + static noinline_for_stack int ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, - struct ext4_free_data *new_entry) + struct ext4_free_data *nfd) { - ext4_group_t group = e4b->bd_group; - ext4_grpblk_t cluster; - ext4_grpblk_t clusters = new_entry->efd_count; - struct ext4_free_data *entry; struct ext4_group_info *db = e4b->bd_info; struct super_block *sb = e4b->bd_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); - struct rb_node **n = &db->bb_free_root.rb_node, *node; - struct rb_node *parent = NULL, *new_node; BUG_ON(!ext4_handle_valid(handle)); BUG_ON(e4b->bd_bitmap_page == NULL); BUG_ON(e4b->bd_buddy_page == NULL); - new_node = &new_entry->efd_node; - cluster = new_entry->efd_start_cluster; - - if (!*n) { + if (!db->bb_free_root.rb_node) { /* first free block exent. We need to protect buddy cache from being freed, * otherwise we'll refresh it from @@ -5099,44 +5130,19 @@ static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi, get_page(e4b->bd_buddy_page); get_page(e4b->bd_bitmap_page); } - while (*n) { - parent = *n; - entry = rb_entry(parent, struct ext4_free_data, efd_node); - if (cluster < entry->efd_start_cluster) - n = &(*n)->rb_left; - else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) - n = &(*n)->rb_right; - else { - ext4_grp_locked_error(sb, group, 0, - ext4_group_first_block_no(sb, group) + - EXT4_C2B(sbi, cluster), - "Block already on to-be-freed list"); - kmem_cache_free(ext4_free_data_cachep, new_entry); - return 0; - } - } - - rb_link_node(new_node, parent, n); - rb_insert_color(new_node, &db->bb_free_root); - - /* Now try to see the extent can be merged to left and right */ - node = rb_prev(new_node); - if (node) { - entry = rb_entry(node, struct ext4_free_data, efd_node); - ext4_try_merge_freed_extent(sbi, entry, new_entry, - &(db->bb_free_root)); - } - node = rb_next(new_node); - if (node) { - entry = rb_entry(node, struct ext4_free_data, efd_node); - ext4_try_merge_freed_extent(sbi, entry, new_entry, - &(db->bb_free_root)); + if (ext4_insert_free_data(sbi, &db->bb_free_root, nfd)) { + ext4_grp_locked_error(sb, e4b->bd_group, 0, + ext4_group_first_block_no(sb, e4b->bd_group) + + EXT4_C2B(sbi, nfd->efd_start_cluster), + "Block already on to-be-freed list"); + kmem_cache_free(ext4_free_data_cachep, nfd); + return 0; } spin_lock(&sbi->s_md_lock); - list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list); - sbi->s_mb_free_pending += clusters; + list_add_tail(&nfd->efd_list, &sbi->s_freed_data_list); + sbi->s_mb_free_pending += nfd->efd_count; spin_unlock(&sbi->s_md_lock); return 0; } -- 1.8.3.1