From: Zhang Yi <yi.zhang@xxxxxxxxxx> Implement mmap iomap path, add ext4_iomap_page_mkwrite() to map blocks and dirty folio, almost everying have been done in iomap_page_mkwrite(), so invoke it directly. Signed-off-by: Zhang Yi <yi.zhang@xxxxxxxxxx> --- fs/ext4/inode.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index c22b49521df2..ec390bb59b6b 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -6401,6 +6401,26 @@ static int ext4_bh_unmapped(handle_t *handle, struct inode *inode, return !buffer_mapped(bh); } +static vm_fault_t ext4_iomap_page_mkwrite(struct vm_fault *vmf) +{ + struct inode *inode = file_inode(vmf->vma->vm_file); + const struct iomap_ops *iomap_ops; + + /* + * ext4_nonda_switch() could writeback this folio, so have to + * call it before lock folio. + * + * TODO: drop ext4_nonda_switch() after reserving enough sapce + * for metadata and merge delalloc and nodelalloc operations. + */ + if (test_opt(inode->i_sb, DELALLOC) && !ext4_nonda_switch(inode->i_sb)) + iomap_ops = &ext4_iomap_buffered_da_write_ops; + else + iomap_ops = &ext4_iomap_buffered_write_ops; + + return iomap_page_mkwrite(vmf, iomap_ops); +} + vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; @@ -6424,6 +6444,11 @@ vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf) filemap_invalidate_lock_shared(mapping); + if (ext4_test_inode_state(inode, EXT4_STATE_BUFFERED_IOMAP)) { + ret = ext4_iomap_page_mkwrite(vmf); + goto out; + } + err = ext4_convert_inline_data(inode); if (err) goto out_ret; -- 2.39.2