On Mon 01-08-22 08:50:34, Khazhismel Kumykov wrote: > When a disk is removed, bdi_unregister gets called to stop further > writeback and wait for associated delayed work to complete. However, > wb_inode_writeback_end() may schedule bandwidth estimation dwork after > this has completed, which can result in the timer attempting to access > the just freed bdi_writeback. > > Fix this by checking if the bdi_writeback is alive, similar to when > scheduling writeback work. > > Since this requires wb->work_lock, and wb_inode_writeback_end() may get > called from interrupt, switch wb->work_lock to an irqsafe lock. > > Fixes: 45a2966fd641 ("writeback: fix bandwidth estimate for spiky workload") > Signed-off-by: Khazhismel Kumykov <khazhy@xxxxxxxxxx> Looks good to me. Feel free to add: Reviewed-by: Jan Kara <jack@xxxxxxx> Honza > --- > fs/fs-writeback.c | 12 ++++++------ > mm/backing-dev.c | 10 +++++----- > mm/page-writeback.c | 6 +++++- > 3 files changed, 16 insertions(+), 12 deletions(-) > > v2: made changelog a bit more verbose > > diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c > index 05221366a16d..08a1993ab7fd 100644 > --- a/fs/fs-writeback.c > +++ b/fs/fs-writeback.c > @@ -134,10 +134,10 @@ static bool inode_io_list_move_locked(struct inode *inode, > > static void wb_wakeup(struct bdi_writeback *wb) > { > - spin_lock_bh(&wb->work_lock); > + spin_lock_irq(&wb->work_lock); > if (test_bit(WB_registered, &wb->state)) > mod_delayed_work(bdi_wq, &wb->dwork, 0); > - spin_unlock_bh(&wb->work_lock); > + spin_unlock_irq(&wb->work_lock); > } > > static void finish_writeback_work(struct bdi_writeback *wb, > @@ -164,7 +164,7 @@ static void wb_queue_work(struct bdi_writeback *wb, > if (work->done) > atomic_inc(&work->done->cnt); > > - spin_lock_bh(&wb->work_lock); > + spin_lock_irq(&wb->work_lock); > > if (test_bit(WB_registered, &wb->state)) { > list_add_tail(&work->list, &wb->work_list); > @@ -172,7 +172,7 @@ static void wb_queue_work(struct bdi_writeback *wb, > } else > finish_writeback_work(wb, work); > > - spin_unlock_bh(&wb->work_lock); > + spin_unlock_irq(&wb->work_lock); > } > > /** > @@ -2082,13 +2082,13 @@ static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb) > { > struct wb_writeback_work *work = NULL; > > - spin_lock_bh(&wb->work_lock); > + spin_lock_irq(&wb->work_lock); > if (!list_empty(&wb->work_list)) { > work = list_entry(wb->work_list.next, > struct wb_writeback_work, list); > list_del_init(&work->list); > } > - spin_unlock_bh(&wb->work_lock); > + spin_unlock_irq(&wb->work_lock); > return work; > } > > diff --git a/mm/backing-dev.c b/mm/backing-dev.c > index 95550b8fa7fe..de65cb1e5f76 100644 > --- a/mm/backing-dev.c > +++ b/mm/backing-dev.c > @@ -260,10 +260,10 @@ void wb_wakeup_delayed(struct bdi_writeback *wb) > unsigned long timeout; > > timeout = msecs_to_jiffies(dirty_writeback_interval * 10); > - spin_lock_bh(&wb->work_lock); > + spin_lock_irq(&wb->work_lock); > if (test_bit(WB_registered, &wb->state)) > queue_delayed_work(bdi_wq, &wb->dwork, timeout); > - spin_unlock_bh(&wb->work_lock); > + spin_unlock_irq(&wb->work_lock); > } > > static void wb_update_bandwidth_workfn(struct work_struct *work) > @@ -334,12 +334,12 @@ static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb); > static void wb_shutdown(struct bdi_writeback *wb) > { > /* Make sure nobody queues further work */ > - spin_lock_bh(&wb->work_lock); > + spin_lock_irq(&wb->work_lock); > if (!test_and_clear_bit(WB_registered, &wb->state)) { > - spin_unlock_bh(&wb->work_lock); > + spin_unlock_irq(&wb->work_lock); > return; > } > - spin_unlock_bh(&wb->work_lock); > + spin_unlock_irq(&wb->work_lock); > > cgwb_remove_from_bdi_list(wb); > /* > diff --git a/mm/page-writeback.c b/mm/page-writeback.c > index 55c2776ae699..3c34db15cf70 100644 > --- a/mm/page-writeback.c > +++ b/mm/page-writeback.c > @@ -2867,6 +2867,7 @@ static void wb_inode_writeback_start(struct bdi_writeback *wb) > > static void wb_inode_writeback_end(struct bdi_writeback *wb) > { > + unsigned long flags; > atomic_dec(&wb->writeback_inodes); > /* > * Make sure estimate of writeback throughput gets updated after > @@ -2875,7 +2876,10 @@ static void wb_inode_writeback_end(struct bdi_writeback *wb) > * that if multiple inodes end writeback at a similar time, they get > * batched into one bandwidth update. > */ > - queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL); > + spin_lock_irqsave(&wb->work_lock, flags); > + if (test_bit(WB_registered, &wb->state)) > + queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL); > + spin_unlock_irqrestore(&wb->work_lock, flags); > } > > bool __folio_end_writeback(struct folio *folio) > -- > 2.37.1.455.g008518b4e5-goog > -- Jan Kara <jack@xxxxxxxx> SUSE Labs, CR