Currently in case if EXT4_MB_GRP_NEED_INIT(grp) is true, then we first check for few things like grp->bb_free etc with spinlock (ext4_lock_group) held. Then we drop the lock only to initialize the group's buddy cache and then again take the lock and check for ext4_mb_good_group(). Once this step is done we return to ext4_mb_regular_allocator(), load the buddy and then again take the lock only to check ext4_mb_good_group(), which was anyways done in previous step. I believe we can optimize one step here where if the group needs init we can check for only few things and return early. Then recheck for ext4_mb_good_group() only once after loading the buddy cache. Signed-off-by: Ritesh Harjani <riteshh@xxxxxxxxxxxxx> --- fs/ext4/mballoc.c | 44 +++++++++++++++++++++----------------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index dcd05ff7c012..7d766dc34cdd 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2134,33 +2134,34 @@ static bool ext4_mb_good_group(struct ext4_allocation_context *ac, * during ext4_mb_init_group(). This should not be called with * ext4_lock_group() held. */ -static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, +static bool ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, ext4_group_t group, int cr) { struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); struct super_block *sb = ac->ac_sb; ext4_grpblk_t free; - int ret = 0; + bool ret = false, need_init = EXT4_MB_GRP_NEED_INIT(grp); ext4_lock_group(sb, group); - free = grp->bb_free; - if (free == 0) - goto out; - if (cr <= 2 && free < ac->ac_g_ex.fe_len) - goto out; - if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) - goto out; - ext4_unlock_group(sb, group); - - /* We only do this if the grp has never been initialized */ - if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { - ret = ext4_mb_init_group(ac->ac_sb, group, GFP_NOFS); - if (ret) - return ret; + /* + * If the group needs init then no need to call ext4_mb_init_group() + * after dropping the lock. It's better we check bb_free/other things + * here and if it meets the criteria than return true. Later we + * will anyway check for good group after loading the buddy cache + * which, if required will call ext4_mb_init_group() from within. + */ + if (unlikely(need_init)) { + free = grp->bb_free; + if (free == 0) + goto out; + if (cr <= 2 && free < ac->ac_g_ex.fe_len) + goto out; + if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) + goto out; + ret = true; + } else { + ret = ext4_mb_good_group(ac, group, cr); } - - ext4_lock_group(sb, group); - ret = ext4_mb_good_group(ac, group, cr); out: ext4_unlock_group(sb, group); return ret; @@ -2252,11 +2253,8 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac) /* This now checks without needing the buddy page */ ret = ext4_mb_good_group_nolock(ac, group, cr); - if (ret <= 0) { - if (!first_err) - first_err = ret; + if (ret == 0) continue; - } err = ext4_mb_load_buddy(sb, group, &e4b); if (err) -- 2.21.0