Make use of the new iomap_iter->private field to avoid a memory allocation per iomap range. Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- fs/btrfs/inode.c | 36 ++++++++++++++---------------------- 1 file changed, 14 insertions(+), 22 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index ab3ff4747266a..adcd392caa78e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7511,10 +7511,11 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, loff_t length, unsigned int flags, struct iomap *iomap, struct iomap *srcmap) { + struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct extent_map *em; struct extent_state *cached_state = NULL; - struct btrfs_dio_data *dio_data = NULL; + struct btrfs_dio_data *dio_data = iter->private; u64 lockstart, lockend; const bool write = !!(flags & IOMAP_WRITE); int ret = 0; @@ -7541,21 +7542,15 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, return ret; } - dio_data = kzalloc(sizeof(*dio_data), GFP_NOFS); - if (!dio_data) - return -ENOMEM; - - iomap->private = dio_data; - + dio_data->submitted = 0; + dio_data->data_reserved = NULL; /* * If this errors out it's because we couldn't invalidate pagecache for * this range and we need to fallback to buffered. */ - if (lock_extent_direct(inode, lockstart, lockend, &cached_state, write)) { - ret = -ENOTBLK; - goto err; - } + if (lock_extent_direct(inode, lockstart, lockend, &cached_state, write)) + return -ENOTBLK; em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len); if (IS_ERR(em)) { @@ -7664,24 +7659,22 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, unlock_err: unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, &cached_state); -err: - kfree(dio_data); - return ret; } static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length, ssize_t written, unsigned int flags, struct iomap *iomap) { - int ret = 0; - struct btrfs_dio_data *dio_data = iomap->private; + struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap); + struct btrfs_dio_data *dio_data = iter->private; size_t submitted = dio_data->submitted; const bool write = !!(flags & IOMAP_WRITE); + int ret = 0; if (!write && (iomap->type == IOMAP_HOLE)) { /* If reading from a hole, unlock and return */ unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1); - goto out; + return 0; } if (submitted < length) { @@ -7698,10 +7691,6 @@ static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length, if (write) extent_changeset_free(dio_data->data_reserved); -out: - kfree(dio_data); - iomap->private = NULL; - return ret; } @@ -7935,7 +7924,7 @@ static void btrfs_submit_direct(const struct iomap_iter *iter, int ret; blk_status_t status; struct btrfs_io_geometry geom; - struct btrfs_dio_data *dio_data = iter->iomap.private; + struct btrfs_dio_data *dio_data = iter->private; struct extent_map *em = NULL; dip = btrfs_create_dio_private(dio_bio, inode, file_offset); @@ -8062,6 +8051,9 @@ static const struct iomap_dio_ops btrfs_dio_ops = { ssize_t btrfs_dio_rw(struct kiocb *iocb, struct iov_iter *iter, size_t done_before) { + struct btrfs_dio_data data; + + iocb->private = &data; return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, IOMAP_DIO_PARTIAL, done_before); } -- 2.30.2