On 26.11.19 г. 5:14 ч., Goldwyn Rodrigues wrote: > From: Goldwyn Rodrigues <rgoldwyn@xxxxxxxx> > > Switch from __blockdev_direct_IO() to iomap_dio_rw(). > Rename btrfs_get_blocks_direct() to btrfs_dio_iomap_begin() and use it > as iomap_begin() for iomap direct I/O functions. This function > allocates and locks all the blocks required for the I/O. > btrfs_submit_direct() is used as the submit_io() hook for direct I/O > ops. > > Since we need direct I/O reads to go through iomap_dio_rw(), we change > file_operations.read_iter() to a btrfs_file_read_iter() which calls > btrfs_direct_IO() for direct reads and falls back to > generic_file_buffered_read() for incomplete reads and buffered reads. > > We don't need address_space.direct_IO() anymore so set it to noop. > Similarly, we don't need flags used in __blockdev_direct_IO(). iomap is > capable of direct I/O reads from a hole, so we don't need to return > -ENOENT. > > Signed-off-by: Goldwyn Rodrigues <rgoldwyn@xxxxxxxx> > --- > fs/btrfs/ctree.h | 2 + > fs/btrfs/file.c | 15 ++++- > fs/btrfs/inode.c | 171 ++++++++++++++++++++++++++----------------------------- > 3 files changed, 97 insertions(+), 91 deletions(-) > > diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h > index fe2b8765d9e6..6f038ba34512 100644 > --- a/fs/btrfs/ctree.h > +++ b/fs/btrfs/ctree.h > @@ -2903,6 +2903,8 @@ int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end); > void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start, > u64 end, int uptodate); > extern const struct dentry_operations btrfs_dentry_operations; > +ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter); > +ssize_t btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter); > > /* ioctl.c */ > long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); > diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c > index 435a502a3226..960a0767f532 100644 > --- a/fs/btrfs/file.c > +++ b/fs/btrfs/file.c > @@ -1834,7 +1834,7 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) > loff_t endbyte; > int err; > > - written = generic_file_direct_write(iocb, from); > + written = btrfs_dio_write(iocb, from); > > if (written < 0 || !iov_iter_count(from)) > return written; > @@ -3452,9 +3452,20 @@ static int btrfs_file_open(struct inode *inode, struct file *filp) > return generic_file_open(inode, filp); > } > > +static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) > +{ > + ssize_t ret = 0; > + if (iocb->ki_flags & IOCB_DIRECT) > + ret = btrfs_dio_read(iocb, to); > + if (ret < 0) > + return ret; > + > + return generic_file_buffered_read(iocb, to, ret); > +} > + > const struct file_operations btrfs_file_operations = { > .llseek = btrfs_file_llseek, > - .read_iter = generic_file_read_iter, > + .read_iter = btrfs_file_read_iter, > .splice_read = generic_file_splice_read, > .write_iter = btrfs_file_write_iter, > .mmap = btrfs_file_mmap, > diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c > index 015910079e73..9a39a53b9068 100644 > --- a/fs/btrfs/inode.c > +++ b/fs/btrfs/inode.c > @@ -29,6 +29,7 @@ > #include <linux/iversion.h> > #include <linux/swap.h> > #include <linux/sched/mm.h> > +#include <linux/iomap.h> > #include <asm/unaligned.h> > #include "misc.h" > #include "ctree.h" > @@ -7619,28 +7620,7 @@ static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len, > } > > > -static int btrfs_get_blocks_direct_read(struct extent_map *em, > - struct buffer_head *bh_result, > - struct inode *inode, > - u64 start, u64 len) > -{ > - if (em->block_start == EXTENT_MAP_HOLE || > - test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) > - return -ENOENT; > - > - len = min(len, em->len - (start - em->start)); > - > - bh_result->b_blocknr = (em->block_start + (start - em->start)) >> > - inode->i_blkbits; > - bh_result->b_size = len; > - bh_result->b_bdev = em->bdev; > - set_buffer_mapped(bh_result); > - > - return 0; > -} > - > static int btrfs_get_blocks_direct_write(struct extent_map **map, > - struct buffer_head *bh_result, > struct inode *inode, > struct btrfs_dio_data *dio_data, > u64 start, u64 len) > @@ -7702,7 +7682,6 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, > } > > /* this will cow the extent */ > - len = bh_result->b_size; > free_extent_map(em); > *map = em = btrfs_new_extent_direct(inode, start, len); > if (IS_ERR(em)) { > @@ -7713,15 +7692,6 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, > len = min(len, em->len - (start - em->start)); > > skip_cow: > - bh_result->b_blocknr = (em->block_start + (start - em->start)) >> > - inode->i_blkbits; > - bh_result->b_size = len; > - bh_result->b_bdev = em->bdev; > - set_buffer_mapped(bh_result); > - > - if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) > - set_buffer_new(bh_result); > - > /* > * Need to update the i_size under the extent lock so buffered > * readers will get the updated i_size when we unlock. > @@ -7737,17 +7707,19 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, > return ret; > } > > -static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, > - struct buffer_head *bh_result, int create) > +static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, > + loff_t length, unsigned flags, struct iomap *iomap, > + struct iomap *srcmap) > { > struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); > struct extent_map *em; > struct extent_state *cached_state = NULL; > struct btrfs_dio_data *dio_data = NULL; > - u64 start = iblock << inode->i_blkbits; > u64 lockstart, lockend; > - u64 len = bh_result->b_size; > + int create = flags & IOMAP_WRITE; > int ret = 0; > + u64 len = length; > + bool unlock_extents = false; > > if (!create) > len = min_t(u64, len, fs_info->sectorsize); > @@ -7755,6 +7727,17 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, > lockstart = start; > lockend = start + len - 1; > > + /* > + * The generic stuff only does filemap_write_and_wait_range, which > + * isn't enough if we've written compressed pages to this area, so > + * we need to flush the dirty pages again to make absolutely sure > + * that any outstanding dirty pages are on disk. > + */ > + if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, > + &BTRFS_I(inode)->runtime_flags)) > + ret = filemap_fdatawrite_range(inode->i_mapping, start, > + start + length - 1); > + > if (current->journal_info) { > /* > * Need to pull our outstanding extents and set journal_info to NULL so > @@ -7803,35 +7786,45 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, > } > > if (create) { > - ret = btrfs_get_blocks_direct_write(&em, bh_result, inode, > + ret = btrfs_get_blocks_direct_write(&em, inode, > dio_data, start, len); > if (ret < 0) > goto unlock_err; > - > - unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, > - lockend, &cached_state); > + unlock_extents = true; > + } else if (em->block_start == EXTENT_MAP_HOLE || > + test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { > + /* Unlock in case of direct reading from a hole */ > + unlock_extents = true; > } else { > - ret = btrfs_get_blocks_direct_read(em, bh_result, inode, > - start, len); > - /* Can be negative only if we read from a hole */ > - if (ret < 0) { > - ret = 0; > - free_extent_map(em); > - goto unlock_err; > - } > + > + len = min(len, em->len - (start - em->start)); > /* > * We need to unlock only the end area that we aren't using. > * The rest is going to be unlocked by the endio routine. > */ > - lockstart = start + bh_result->b_size; > - if (lockstart < lockend) { > - unlock_extent_cached(&BTRFS_I(inode)->io_tree, > - lockstart, lockend, &cached_state); > - } else { > + lockstart = start + len; > + if (lockstart < lockend) > + unlock_extents = true; > + else > free_extent_state(cached_state); > - } > } > > + if (unlock_extents) > + unlock_extent_cached(&BTRFS_I(inode)->io_tree, > + lockstart, lockend, &cached_state); > + > + if ((em->block_start == EXTENT_MAP_HOLE) || > + (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && !create)) { > + iomap->addr = IOMAP_NULL_ADDR; > + iomap->type = IOMAP_HOLE; > + } else { > + iomap->addr = em->block_start; > + iomap->type = IOMAP_MAPPED; > + } > + iomap->offset = em->start; > + iomap->bdev = em->bdev; > + iomap->length = em->len; > + So here you always return a full extent worth of data, why not trimming it to the requested range? I think this is still correct because the generic iomap infrastructure will only process the requested range, at least as far as iomap->length is concerned. But I'm not so sure about iomap->offset though.