On Thu, 26 Sep 2024, Al Viro wrote: > On Mon, Sep 16, 2024 at 02:34:59PM +1000, NeilBrown wrote: > > > @@ -5011,7 +5012,8 @@ int do_renameat2(int olddfd, struct filename *from, int newdfd, > > struct path old_path, new_path; > > struct qstr old_last, new_last; > > int old_type, new_type; > > - struct inode *delegated_inode = NULL; > > + struct inode *delegated_inode_old = NULL; > > + struct inode *delegated_inode_new = NULL; > > unsigned int lookup_flags = 0, target_flags = LOOKUP_RENAME_TARGET; > > bool should_retry = false; > > int error = -EINVAL; > > @@ -5118,7 +5120,8 @@ int do_renameat2(int olddfd, struct filename *from, int newdfd, > > rd.new_dir = new_path.dentry->d_inode; > > rd.new_dentry = new_dentry; > > rd.new_mnt_idmap = mnt_idmap(new_path.mnt); > > - rd.delegated_inode = &delegated_inode; > > + rd.delegated_inode_old = &delegated_inode_old; > > + rd.delegated_inode_new = &delegated_inode_new; > > rd.flags = flags; > > error = vfs_rename(&rd); > > exit5: > > @@ -5128,9 +5131,14 @@ int do_renameat2(int olddfd, struct filename *from, int newdfd, > > exit3: > > unlock_rename(new_path.dentry, old_path.dentry); > > exit_lock_rename: > > - if (delegated_inode) { > > - error = break_deleg_wait(&delegated_inode); > > - if (!error) > > + if (delegated_inode_old) { > > + error = break_deleg_wait(&delegated_inode_old, error); > > + if (error == -EWOULDBLOCK) > > + goto retry_deleg; > > Won't that goto leak a reference to delegated_inode_new? I don't think so. The old delegated_inode_new will be carried in to vfs_rename() and passed to try_break_deleg() which will notice that it is not-NULL and will "do the right thing". Both _old and _new are initialised to zero at the start of do_renameat2(), Both are passed to break_deleg_wait() on the last time through the retry_deleg loop which will drop the references - or will preserve the reference if it isn't the last time - and both are only set by try_break_deleg() which is careful to check if a prior value exists. So I think there are no leaks. Thanks, NeilBrown > > > + } > > + if (delegated_inode_new) { > > + error = break_deleg_wait(&delegated_inode_new, error); > > + if (error == -EWOULDBLOCK) > > goto retry_deleg; > > } > > mnt_drop_write(old_path.mnt); > > > -static inline int break_deleg_wait(struct inode **delegated_inode) > > +static inline int break_deleg_wait(struct inode **delegated_inode, int ret) > > { > > - int ret; > > - > > - ret = break_deleg(*delegated_inode, O_WRONLY); > > - iput(*delegated_inode); > > - *delegated_inode = NULL; > > + if (ret == -EWOULDBLOCK) { > > + ret = break_deleg(*delegated_inode, O_WRONLY); > > + if (ret == 0) > > + ret = -EWOULDBLOCK; > > + } > > + if (ret != -EWOULDBLOCK) { > > + atomic_dec(&(*delegated_inode)->i_flctx->flc_deleg_blockers); > > + iput(*delegated_inode); > > + *delegated_inode = NULL; > > + } > > return ret; > > } >