On Sun, Dec 24, 2023 at 12:49 PM Bernd Schubert <bschubert@xxxxxxx> wrote: > > So far this is just a helper to remove complex locking > logic out of fuse_direct_write_iter. Especially needed > by the next patch in the series to that adds the fuse inode > cache IO mode and adds in even more locking complexity. > > Signed-off-by: Bernd Schubert <bschubert@xxxxxxx> Reviewed-by: Amir Goldstein <amir73il@xxxxxxxxx> > --- > fs/fuse/file.c | 61 ++++++++++++++++++++++++++++---------------------- > 1 file changed, 34 insertions(+), 27 deletions(-) > > diff --git a/fs/fuse/file.c b/fs/fuse/file.c > index 546254aaab19f..abc93415ec7e3 100644 > --- a/fs/fuse/file.c > +++ b/fs/fuse/file.c > @@ -1337,6 +1337,37 @@ static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from > return false; > } > > +static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from, > + bool *exclusive) > +{ > + struct inode *inode = file_inode(iocb->ki_filp); > + > + *exclusive = fuse_dio_wr_exclusive_lock(iocb, from); > + if (*exclusive) { > + inode_lock(inode); > + } else { > + inode_lock_shared(inode); > + /* > + * Previous check was without inode lock and might have raced, > + * check again. > + */ > + if (fuse_io_past_eof(iocb, from)) { > + inode_unlock_shared(inode); > + inode_lock(inode); > + *exclusive = true; > + } > + } > +} > + > +static void fuse_dio_unlock(struct inode *inode, bool exclusive) > +{ > + if (exclusive) { > + inode_unlock(inode); > + } else { > + inode_unlock_shared(inode); > + } > +} > + > static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) > { > struct file *file = iocb->ki_filp; > @@ -1601,30 +1632,9 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) > struct inode *inode = file_inode(iocb->ki_filp); > struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); > ssize_t res; > - bool exclusive_lock = fuse_dio_wr_exclusive_lock(iocb, from); > - > - /* > - * Take exclusive lock if > - * - Parallel direct writes are disabled - a user space decision > - * - Parallel direct writes are enabled and i_size is being extended. > - * - Shared mmap on direct_io file is supported (FUSE_DIRECT_IO_ALLOW_MMAP). > - * This might not be needed at all, but needs further investigation. > - */ > - if (exclusive_lock) > - inode_lock(inode); > - else { > - inode_lock_shared(inode); > - > - /* > - * Previous check was without any lock and might have raced. > - */ > - if (fuse_dio_wr_exclusive_lock(iocb, from)) { > - inode_unlock_shared(inode); > - inode_lock(inode); > - exclusive_lock = true; > - } > - } > + bool exclusive; > > + fuse_dio_lock(iocb, from, &exclusive); > res = generic_write_checks(iocb, from); > if (res > 0) { > if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) { > @@ -1635,10 +1645,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) > fuse_write_update_attr(inode, iocb->ki_pos, res); > } > } > - if (exclusive_lock) > - inode_unlock(inode); > - else > - inode_unlock_shared(inode); > + fuse_dio_unlock(inode, exclusive); > > return res; > } > -- > 2.40.1 >