Abstract out the logic to double check for overlaps in normalize_pa to a separate function. Since there has been no reports in past where we have seen any overlaps which hits this bug_on(), in future we can consider calling this function under "#ifdef AGGRESSIVE_CHECK" only. There are no functional changes in this patch Signed-off-by: Ojaswin Mujoo <ojaswin@xxxxxxxxxxxxx> Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@xxxxxxxxx> Reviewed-by: Jan Kara <jack@xxxxxxx> --- fs/ext4/mballoc.c | 36 ++++++++++++++++++++++++------------ 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index f4e699bce99f..1628b008a096 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -3984,6 +3984,29 @@ static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len); } +static inline void +ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac, + ext4_lblk_t start, ext4_lblk_t end) +{ + struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); + struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); + struct ext4_prealloc_space *tmp_pa; + ext4_lblk_t tmp_pa_start, tmp_pa_end; + + rcu_read_lock(); + list_for_each_entry_rcu(tmp_pa, &ei->i_prealloc_list, pa_inode_list) { + spin_lock(&tmp_pa->pa_lock); + if (tmp_pa->pa_deleted == 0) { + tmp_pa_start = tmp_pa->pa_lstart; + tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len); + + BUG_ON(!(start >= tmp_pa_end || end <= tmp_pa_start)); + } + spin_unlock(&tmp_pa->pa_lock); + } + rcu_read_unlock(); +} + /* * Normalization means making request better in terms of * size and alignment @@ -4140,18 +4163,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, size = end - start; /* XXX: extra loop to check we really don't overlap preallocations */ - rcu_read_lock(); - list_for_each_entry_rcu(tmp_pa, &ei->i_prealloc_list, pa_inode_list) { - spin_lock(&tmp_pa->pa_lock); - if (tmp_pa->pa_deleted == 0) { - tmp_pa_start = tmp_pa->pa_lstart; - tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len); - - BUG_ON(!(start >= tmp_pa_end || end <= tmp_pa_start)); - } - spin_unlock(&tmp_pa->pa_lock); - } - rcu_read_unlock(); + ext4_mb_pa_assert_overlap(ac, start, end); /* * In this function "start" and "size" are normalized for better -- 2.31.1