Extend the passthrough feature by handling asynchronous IO both for read and write operations. When an AIO request is received, if the request targets a FUSE file with the passthrough functionality enabled, a new identical AIO request is created. The new request targets the backing file and gets assigned a special FUSE passthrough AIO completion callback. When the backing file AIO request is completed, the FUSE passthrough AIO completion callback is executed and propagates the completion signal to the FUSE AIO request by triggering its completion callback as well. Signed-off-by: Alessio Balsini <balsini@xxxxxxxxxxx> Signed-off-by: Amir Goldstein <amir73il@xxxxxxxxx> --- fs/fuse/passthrough.c | 82 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 75 insertions(+), 7 deletions(-) diff --git a/fs/fuse/passthrough.c b/fs/fuse/passthrough.c index 9d81f3982c96..2ccd2d6de736 100644 --- a/fs/fuse/passthrough.c +++ b/fs/fuse/passthrough.c @@ -9,6 +9,36 @@ #define FUSE_IOCB_MASK \ (IOCB_APPEND | IOCB_DSYNC | IOCB_HIPRI | IOCB_NOWAIT | IOCB_SYNC) +struct fuse_aio_req { + struct kiocb iocb; + struct kiocb *iocb_fuse; +}; + +static void fuse_aio_cleanup_handler(struct fuse_aio_req *aio_req) +{ + struct kiocb *iocb = &aio_req->iocb; + struct kiocb *iocb_fuse = aio_req->iocb_fuse; + + if (iocb->ki_flags & IOCB_WRITE) { + __sb_writers_acquired(file_inode(iocb->ki_filp)->i_sb, + SB_FREEZE_WRITE); + file_end_write(iocb->ki_filp); + } + + iocb_fuse->ki_pos = iocb->ki_pos; + kfree(aio_req); +} + +static void fuse_aio_rw_complete(struct kiocb *iocb, long res) +{ + struct fuse_aio_req *aio_req = + container_of(iocb, struct fuse_aio_req, iocb); + struct kiocb *iocb_fuse = aio_req->iocb_fuse; + + fuse_aio_cleanup_handler(aio_req); + iocb_fuse->ki_complete(iocb_fuse, res); +} + ssize_t fuse_passthrough_read_iter(struct kiocb *iocb_fuse, struct iov_iter *iter) { @@ -21,8 +51,24 @@ ssize_t fuse_passthrough_read_iter(struct kiocb *iocb_fuse, if (!iov_iter_count(iter)) return 0; - rwf = iocb_to_rw_flags(iocb_fuse->ki_flags, FUSE_IOCB_MASK); - ret = vfs_iter_read(passthrough_filp, iter, &iocb_fuse->ki_pos, rwf); + if (is_sync_kiocb(iocb_fuse)) { + rwf = iocb_to_rw_flags(iocb_fuse->ki_flags, FUSE_IOCB_MASK); + ret = vfs_iter_read(passthrough_filp, iter, &iocb_fuse->ki_pos, + rwf); + } else { + struct fuse_aio_req *aio_req; + + aio_req = kmalloc(sizeof(struct fuse_aio_req), GFP_KERNEL); + if (!aio_req) + return -ENOMEM; + + aio_req->iocb_fuse = iocb_fuse; + kiocb_clone(&aio_req->iocb, iocb_fuse, passthrough_filp); + aio_req->iocb.ki_complete = fuse_aio_rw_complete; + ret = call_read_iter(passthrough_filp, &aio_req->iocb, iter); + if (ret != -EIOCBQUEUED) + fuse_aio_cleanup_handler(aio_req); + } return ret; } @@ -34,6 +80,7 @@ ssize_t fuse_passthrough_write_iter(struct kiocb *iocb_fuse, struct fuse_file *ff = fuse_filp->private_data; struct inode *fuse_inode = file_inode(fuse_filp); struct file *passthrough_filp = ff->passthrough->filp; + struct inode *passthrough_inode = file_inode(passthrough_filp); ssize_t ret; rwf_t rwf; @@ -42,11 +89,32 @@ ssize_t fuse_passthrough_write_iter(struct kiocb *iocb_fuse, inode_lock(fuse_inode); - file_start_write(passthrough_filp); - rwf = iocb_to_rw_flags(iocb_fuse->ki_flags, FUSE_IOCB_MASK); - ret = vfs_iter_write(passthrough_filp, iter, &iocb_fuse->ki_pos, rwf); - file_end_write(passthrough_filp); - + if (is_sync_kiocb(iocb_fuse)) { + file_start_write(passthrough_filp); + rwf = iocb_to_rw_flags(iocb_fuse->ki_flags, FUSE_IOCB_MASK); + ret = vfs_iter_write(passthrough_filp, iter, &iocb_fuse->ki_pos, + rwf); + file_end_write(passthrough_filp); + } else { + struct fuse_aio_req *aio_req; + + aio_req = kmalloc(sizeof(struct fuse_aio_req), GFP_KERNEL); + if (!aio_req) { + ret = -ENOMEM; + goto out; + } + + file_start_write(passthrough_filp); + __sb_writers_release(passthrough_inode->i_sb, SB_FREEZE_WRITE); + + aio_req->iocb_fuse = iocb_fuse; + kiocb_clone(&aio_req->iocb, iocb_fuse, passthrough_filp); + aio_req->iocb.ki_complete = fuse_aio_rw_complete; + ret = call_write_iter(passthrough_filp, &aio_req->iocb, iter); + if (ret != -EIOCBQUEUED) + fuse_aio_cleanup_handler(aio_req); + } +out: inode_unlock(fuse_inode); return ret; -- 2.34.1