Extend the passthrough feature by handling asynchronous IO both for read and write operations. When an AIO request is received, targeting a FUSE file with passthrough functionality enabled, a new identical AIO request is created, the file pointer of which is updated with the file pointer of the lower file system, and the completion handler is set with a special AIO passthrough handler. The lower file system AIO request is allocated in dynamic kernel memory and, when it completes, the allocated memory is freed and the completion signal is propagated to the FUSE AIO request by triggering its completion callback as well. Signed-off-by: Alessio Balsini <balsini@xxxxxxxxxxx> --- fs/fuse/passthrough.c | 66 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 63 insertions(+), 3 deletions(-) diff --git a/fs/fuse/passthrough.c b/fs/fuse/passthrough.c index 44a78e02f45d..87b57b26fd8a 100644 --- a/fs/fuse/passthrough.c +++ b/fs/fuse/passthrough.c @@ -2,10 +2,16 @@ #include "fuse_i.h" +#include <linux/aio.h> #include <linux/fs_stack.h> #include <linux/fsnotify.h> #include <linux/uio.h> +struct fuse_aio_req { + struct kiocb iocb; + struct kiocb *iocb_fuse; +}; + static void fuse_copyattr(struct file *dst_file, struct file *src_file, bool write) { @@ -20,6 +26,32 @@ static void fuse_copyattr(struct file *dst_file, struct file *src_file, } } +static void fuse_aio_cleanup_handler(struct fuse_aio_req *aio_req) +{ + struct kiocb *iocb = &aio_req->iocb; + struct kiocb *iocb_fuse = aio_req->iocb_fuse; + bool write = !!(iocb->ki_flags & IOCB_WRITE); + + if (write) { + __sb_writers_acquired(file_inode(iocb->ki_filp)->i_sb, + SB_FREEZE_WRITE); + file_end_write(iocb->ki_filp); + } + + fuse_copyattr(iocb_fuse->ki_filp, iocb->ki_filp, write); + iocb_fuse->ki_pos = iocb->ki_pos; + kfree(aio_req); +} + +static void fuse_aio_rw_complete(struct kiocb *iocb, long res, long res2) +{ + struct fuse_aio_req *aio_req = + container_of(iocb, struct fuse_aio_req, iocb); + struct kiocb *iocb_fuse = aio_req->iocb_fuse; + + fuse_aio_cleanup_handler(aio_req); + iocb_fuse->ki_complete(iocb_fuse, res, res2); +} ssize_t fuse_passthrough_read_iter(struct kiocb *iocb_fuse, struct iov_iter *iter) @@ -42,7 +74,18 @@ ssize_t fuse_passthrough_read_iter(struct kiocb *iocb_fuse, fuse_copyattr(fuse_filp, passthrough_filp, false); } else { - ret = -EIO; + struct fuse_aio_req *aio_req; + + aio_req = kmalloc(sizeof(struct fuse_aio_req), GFP_KERNEL); + if (!aio_req) + return -ENOMEM; + + aio_req->iocb_fuse = iocb_fuse; + kiocb_clone(&aio_req->iocb, iocb_fuse, passthrough_filp); + aio_req->iocb.ki_complete = fuse_aio_rw_complete; + ret = call_read_iter(passthrough_filp, &aio_req->iocb, iter); + if (ret != -EIOCBQUEUED) + fuse_aio_cleanup_handler(aio_req); } return ret; @@ -56,6 +99,7 @@ ssize_t fuse_passthrough_write_iter(struct kiocb *iocb_fuse, struct fuse_file *ff = fuse_filp->private_data; struct inode *fuse_inode = file_inode(fuse_filp); struct file *passthrough_filp = ff->passthrough_filp; + struct inode *passthrough_inode = file_inode(passthrough_filp); if (!iov_iter_count(iter)) return 0; @@ -75,9 +119,25 @@ ssize_t fuse_passthrough_write_iter(struct kiocb *iocb_fuse, if (ret > 0) fuse_copyattr(fuse_filp, passthrough_filp, true); } else { - ret = -EIO; - } + struct fuse_aio_req *aio_req; + aio_req = kmalloc(sizeof(struct fuse_aio_req), GFP_KERNEL); + if (!aio_req) { + ret = -ENOMEM; + goto out; + } + + file_start_write(passthrough_filp); + __sb_writers_release(passthrough_inode->i_sb, SB_FREEZE_WRITE); + + aio_req->iocb_fuse = iocb_fuse; + kiocb_clone(&aio_req->iocb, iocb_fuse, passthrough_filp); + aio_req->iocb.ki_complete = fuse_aio_rw_complete; + ret = call_write_iter(passthrough_filp, &aio_req->iocb, iter); + if (ret != -EIOCBQUEUED) + fuse_aio_cleanup_handler(aio_req); + } +out: inode_unlock(fuse_inode); return ret; -- 2.28.0.618.gf4bc123cb7-goog