Hi Jan, On Thu, Jun 11, 2020 at 10:12 AM Jan Kara <jack@xxxxxxx> wrote: > > Currently, operations on inode->i_io_list are protected by > wb->list_lock. In the following patches we'll need to maintain > consistency between inode->i_state and inode->i_io_list so change the > code so that inode->i_lock protects also all inode's i_io_list handling. > > Signed-off-by: Jan Kara <jack@xxxxxxx> LGTM. Reviewed-by: Martijn Coenen <maco@xxxxxxxxxxx> > --- > fs/fs-writeback.c | 22 +++++++++++++++++----- > 1 file changed, 17 insertions(+), 5 deletions(-) > > diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c > index a605c3dddabc..ff0b18331590 100644 > --- a/fs/fs-writeback.c > +++ b/fs/fs-writeback.c > @@ -144,6 +144,7 @@ static void inode_io_list_del_locked(struct inode *inode, > struct bdi_writeback *wb) > { > assert_spin_locked(&wb->list_lock); > + assert_spin_locked(&inode->i_lock); > > list_del_init(&inode->i_io_list); > wb_io_lists_depopulated(wb); > @@ -1122,7 +1123,9 @@ void inode_io_list_del(struct inode *inode) > struct bdi_writeback *wb; > > wb = inode_to_wb_and_lock_list(inode); > + spin_lock(&inode->i_lock); > inode_io_list_del_locked(inode, wb); > + spin_unlock(&inode->i_lock); > spin_unlock(&wb->list_lock); > } > EXPORT_SYMBOL(inode_io_list_del); > @@ -1172,8 +1175,10 @@ void sb_clear_inode_writeback(struct inode *inode) > * the case then the inode must have been redirtied while it was being written > * out and we don't reset its dirtied_when. > */ > -static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) > +static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb) > { > + assert_spin_locked(&inode->i_lock); > + > if (!list_empty(&wb->b_dirty)) { > struct inode *tail; > > @@ -1184,6 +1189,13 @@ static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) > inode_io_list_move_locked(inode, wb, &wb->b_dirty); > } > > +static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) > +{ > + spin_lock(&inode->i_lock); > + redirty_tail_locked(inode, wb); > + spin_unlock(&inode->i_lock); > +} > + > /* > * requeue inode for re-scanning after bdi->b_io list is exhausted. > */ > @@ -1394,7 +1406,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, > * writeback is not making progress due to locked > * buffers. Skip this inode for now. > */ > - redirty_tail(inode, wb); > + redirty_tail_locked(inode, wb); > return; > } > > @@ -1414,7 +1426,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, > * retrying writeback of the dirty page/inode > * that cannot be performed immediately. > */ > - redirty_tail(inode, wb); > + redirty_tail_locked(inode, wb); > } > } else if (inode->i_state & I_DIRTY) { > /* > @@ -1422,7 +1434,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, > * such as delayed allocation during submission or metadata > * updates after data IO completion. > */ > - redirty_tail(inode, wb); > + redirty_tail_locked(inode, wb); > } else if (inode->i_state & I_DIRTY_TIME) { > inode->dirtied_when = jiffies; > inode_io_list_move_locked(inode, wb, &wb->b_dirty_time); > @@ -1669,8 +1681,8 @@ static long writeback_sb_inodes(struct super_block *sb, > */ > spin_lock(&inode->i_lock); > if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { > + redirty_tail_locked(inode, wb); > spin_unlock(&inode->i_lock); > - redirty_tail(inode, wb); > continue; > } > if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { > -- > 2.16.4 >