From: Zhang Yi <yi.zhang@xxxxxxxxxx> Prepare to reserve metadata blocks for delay allocation in ext4_da_reserve_space(), claim reserved space from the global sbi->s_dirtyclusters_counter, and also updating tracepoints to show the new reserved metadata blocks. This patch is just a preparation, the reserved ext_len is always zero. Signed-off-by: Zhang Yi <yi.zhang@xxxxxxxxxx> --- fs/ext4/inode.c | 28 ++++++++++++++++------------ include/trace/events/ext4.h | 10 ++++++++-- 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index dda17b3340ce..a189009d20fa 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1439,31 +1439,37 @@ static int ext4_journalled_write_end(struct file *file, } /* - * Reserve space for a single cluster + * Reserve space for a 'rsv_dlen' data blocks/clusters and 'rsv_extlen' + * extent metadata blocks. */ -static int ext4_da_reserve_space(struct inode *inode) +static int ext4_da_reserve_space(struct inode *inode, unsigned int rsv_dlen, + unsigned int rsv_extlen) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); int ret; + if (!rsv_dlen && !rsv_extlen) + return 0; + /* * We will charge metadata quota at writeout time; this saves * us from metadata over-estimation, though we may go over by * a small amount in the end. Here we just reserve for data. */ - ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); + ret = dquot_reserve_block(inode, EXT4_C2B(sbi, rsv_dlen)); if (ret) return ret; spin_lock(&ei->i_block_reservation_lock); - if (ext4_claim_free_clusters(sbi, 1, 0)) { + if (ext4_claim_free_clusters(sbi, rsv_dlen + rsv_extlen, 0)) { spin_unlock(&ei->i_block_reservation_lock); - dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); + dquot_release_reservation_block(inode, EXT4_C2B(sbi, rsv_dlen)); return -ENOSPC; } - ei->i_reserved_data_blocks++; - trace_ext4_da_reserve_space(inode); + ei->i_reserved_data_blocks += rsv_dlen; + ei->i_reserved_ext_blocks += rsv_extlen; + trace_ext4_da_reserve_space(inode, rsv_dlen, rsv_extlen); spin_unlock(&ei->i_block_reservation_lock); return 0; /* success */ @@ -1659,11 +1665,9 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk) } } - if (rsv_dlen > 0) { - ret = ext4_da_reserve_space(inode); - if (ret) /* ENOSPC */ - return ret; - } + ret = ext4_da_reserve_space(inode, rsv_dlen, 0); + if (ret) /* ENOSPC */ + return ret; ext4_es_insert_delayed_block(inode, lblk, allocated); return 0; diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 115f96f444ff..7a9839f2d681 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -1251,14 +1251,16 @@ TRACE_EVENT(ext4_da_update_reserve_space, ); TRACE_EVENT(ext4_da_reserve_space, - TP_PROTO(struct inode *inode), + TP_PROTO(struct inode *inode, int data_blocks, int meta_blocks), - TP_ARGS(inode), + TP_ARGS(inode, data_blocks, meta_blocks), TP_STRUCT__entry( __field( dev_t, dev ) __field( ino_t, ino ) __field( __u64, i_blocks ) + __field( int, data_blocks ) + __field( int, meta_blocks ) __field( int, reserved_data_blocks ) __field( int, reserved_ext_blocks ) __field( __u16, mode ) @@ -1268,16 +1270,20 @@ TRACE_EVENT(ext4_da_reserve_space, __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->i_blocks = inode->i_blocks; + __entry->data_blocks = data_blocks; + __entry->meta_blocks = meta_blocks; __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks; __entry->reserved_ext_blocks = EXT4_I(inode)->i_reserved_ext_blocks; __entry->mode = inode->i_mode; ), TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu " + "data_blocks %d meta_blocks %d " "reserved_data_blocks %d reserved_ext_blocks %d", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, __entry->mode, __entry->i_blocks, + __entry->data_blocks, __entry->meta_blocks, __entry->reserved_data_blocks, __entry->reserved_ext_blocks) ); -- 2.39.2