On Mon 13-09-10 20:31:11, Wu Fengguang wrote: > Check background work whenever the flusher thread wakes up. The page > reclaim code may lower the soft dirty limit immediately before sending > some work to the flusher thread. > > This is also the prerequisite of next patch. I have a patch doing something functionally rather similar but it also cleans up the code which isn't necessary after this patch. So could you maybe consider using that one? BTW: What has happened with your patch which started writing back old inodes? Honza > > Signed-off-by: Wu Fengguang <fengguang.wu@xxxxxxxxx> > --- > fs/fs-writeback.c | 18 ++++++++++++++++++ > 1 file changed, 18 insertions(+) > > --- linux-next.orig/fs/fs-writeback.c 2010-09-13 19:41:21.000000000 +0800 > +++ linux-next/fs/fs-writeback.c 2010-09-13 19:49:11.000000000 +0800 > @@ -716,6 +716,23 @@ get_next_work_item(struct backing_dev_in > return work; > } > > +static long wb_check_background_flush(struct bdi_writeback *wb) > +{ > + if (over_bground_thresh()) { > + > + struct wb_writeback_work work = { > + .nr_pages = LONG_MAX, > + .sync_mode = WB_SYNC_NONE, > + .for_background = 1, > + .range_cyclic = 1, > + }; > + > + return wb_writeback(wb, &work); > + } > + > + return 0; > +} > + > static long wb_check_old_data_flush(struct bdi_writeback *wb) > { > unsigned long expired; > @@ -787,6 +804,7 @@ long wb_do_writeback(struct bdi_writebac > * Check for periodic writeback, kupdated() style > */ > wrote += wb_check_old_data_flush(wb); > + wrote += wb_check_background_flush(wb); > clear_bit(BDI_writeback_running, &wb->bdi->state); > > return wrote; > > -- Jan Kara <jack@xxxxxxx> SUSE Labs, CR
>From 548303f16d1880dc7346578d0ced6f24fdeb31fc Mon Sep 17 00:00:00 2001 From: Jan Kara <jack@xxxxxxx> Date: Tue, 17 Aug 2010 13:07:44 +0200 Subject: [PATCH 1/2] mm: Check whether background writeback is needed after finishing each work When bdi flusher thread finishes doing some work check whether any kind of background writeback needs to be done (either because dirty_background_ratio is exceeded or because we need to start flushing old inodes). If so, just do background write back. This way, bdi_start_background_writeback() just needs to wake up the flusher thread. It will do background writeback as soon as there is no other work. This is a preparatory patch for the next patch which stops background writeback as soon as there is other work to do. Signed-off-by: Jan Kara <jack@xxxxxxx> --- fs/fs-writeback.c | 68 +++++++++++++++++++++++++++++++++++----------------- 1 files changed, 46 insertions(+), 22 deletions(-) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 7d9d06b..e2eb42f 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -71,13 +71,9 @@ int writeback_in_progress(struct backing_dev_info *bdi) return test_bit(BDI_writeback_running, &bdi->state); } -static void bdi_queue_work(struct backing_dev_info *bdi, - struct wb_writeback_work *work) +/* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */ +static void _bdi_wakeup_flusher(struct backing_dev_info *bdi) { - trace_writeback_queue(bdi, work); - - spin_lock_bh(&bdi->wb_lock); - list_add_tail(&work->list, &bdi->work_list); if (bdi->wb.task) { wake_up_process(bdi->wb.task); } else { @@ -85,15 +81,26 @@ static void bdi_queue_work(struct backing_dev_info *bdi, * The bdi thread isn't there, wake up the forker thread which * will create and run it. */ - trace_writeback_nothread(bdi, work); wake_up_process(default_backing_dev_info.wb.task); } +} + +static void bdi_queue_work(struct backing_dev_info *bdi, + struct wb_writeback_work *work) +{ + trace_writeback_queue(bdi, work); + + spin_lock_bh(&bdi->wb_lock); + list_add_tail(&work->list, &bdi->work_list); + if (!bdi->wb.task) + trace_writeback_nothread(bdi, work); + _bdi_wakeup_flusher(bdi); spin_unlock_bh(&bdi->wb_lock); } static void __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, - bool range_cyclic, bool for_background) + bool range_cyclic) { struct wb_writeback_work *work; @@ -113,7 +120,6 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, work->sync_mode = WB_SYNC_NONE; work->nr_pages = nr_pages; work->range_cyclic = range_cyclic; - work->for_background = for_background; bdi_queue_work(bdi, work); } @@ -131,7 +137,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, */ void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) { - __bdi_start_writeback(bdi, nr_pages, true, false); + __bdi_start_writeback(bdi, nr_pages, true); } /** @@ -139,13 +145,20 @@ void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) * @bdi: the backing device to write from * * Description: - * This does WB_SYNC_NONE background writeback. The IO is only - * started when this function returns, we make no guarentees on - * completion. Caller need not hold sb s_umount semaphore. + * This makes sure WB_SYNC_NONE background writeback happens. When + * this function returns, it is only guaranteed that for given BDI + * some IO is happening if we are over background dirty threshold. + * Caller need not hold sb s_umount semaphore. */ void bdi_start_background_writeback(struct backing_dev_info *bdi) { - __bdi_start_writeback(bdi, LONG_MAX, true, true); + /* + * We just wake up the flusher thread. It will perform background + * writeback as soon as there is no other work to do. + */ + spin_lock_bh(&bdi->wb_lock); + _bdi_wakeup_flusher(bdi); + spin_unlock_bh(&bdi->wb_lock); } /* @@ -696,21 +709,32 @@ get_next_work_item(struct backing_dev_info *bdi) return work; } -static long wb_check_old_data_flush(struct bdi_writeback *wb) +static long wb_background_data_flush(struct bdi_writeback *wb) { unsigned long expired; long nr_pages; + long written = 0; + + if (over_bground_thresh()) { + struct wb_writeback_work work = { + .nr_pages = LONG_MAX, + .sync_mode = WB_SYNC_NONE, + .for_background = 1, + .range_cyclic = 1, + }; + written += wb_writeback(wb, &work); + } /* * When set to zero, disable periodic writeback */ if (!dirty_writeback_interval) - return 0; + return written; expired = wb->last_old_flush + msecs_to_jiffies(dirty_writeback_interval * 10); if (time_before(jiffies, expired)) - return 0; + return written; wb->last_old_flush = jiffies; nr_pages = global_page_state(NR_FILE_DIRTY) + @@ -725,10 +749,10 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb) .range_cyclic = 1, }; - return wb_writeback(wb, &work); + written += wb_writeback(wb, &work); } - return 0; + return written; } /* @@ -764,9 +788,9 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait) } /* - * Check for periodic writeback, kupdated() style + * Continue doing background writeout if needed */ - wrote += wb_check_old_data_flush(wb); + wrote += wb_background_data_flush(wb); clear_bit(BDI_writeback_running, &wb->bdi->state); return wrote; @@ -853,7 +877,7 @@ void wakeup_flusher_threads(long nr_pages) list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { if (!bdi_has_dirty_io(bdi)) continue; - __bdi_start_writeback(bdi, nr_pages, false, false); + __bdi_start_writeback(bdi, nr_pages, false); } rcu_read_unlock(); } -- 1.6.4.2