Move dwork and last_old_flush from bdi_writeback structure directly into backing_dev_info. Separation between backing_dev_info and bdi_writeback is inconsistent if anything so let's keep only dirty tracking stuff in struct bdi_writeback. Also remove unused nr field when we are changing the structure. Signed-off-by: Jan Kara <jack@xxxxxxx> --- fs/fs-writeback.c | 16 ++++++++-------- include/linux/backing-dev.h | 8 ++++---- mm/backing-dev.c | 14 +++++++------- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 6ee9ee52e3de..47d106ae4879 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -95,7 +95,7 @@ static void bdi_wakeup_thread(struct backing_dev_info *bdi) { spin_lock_bh(&bdi->wb_lock); if (test_bit(BDI_registered, &bdi->state)) - mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); + mod_delayed_work(bdi_wq, &bdi->dwork, 0); spin_unlock_bh(&bdi->wb_lock); } @@ -111,7 +111,7 @@ static void bdi_queue_work(struct backing_dev_info *bdi, goto out_unlock; } list_add_tail(&work->list, &bdi->work_list); - mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); + mod_delayed_work(bdi_wq, &bdi->dwork, 0); out_unlock: spin_unlock_bh(&bdi->wb_lock); } @@ -848,12 +848,12 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb) if (!dirty_writeback_interval) return 0; - expired = wb->last_old_flush + + expired = wb->bdi->last_old_flush + msecs_to_jiffies(dirty_writeback_interval * 10); if (time_before(jiffies, expired)) return 0; - wb->last_old_flush = jiffies; + wb->bdi->last_old_flush = jiffies; nr_pages = get_nr_dirty_pages(); if (nr_pages) { @@ -913,9 +913,9 @@ static long wb_do_writeback(struct bdi_writeback *wb) */ void bdi_writeback_workfn(struct work_struct *work) { - struct bdi_writeback *wb = container_of(to_delayed_work(work), - struct bdi_writeback, dwork); - struct backing_dev_info *bdi = wb->bdi; + struct backing_dev_info *bdi = container_of(to_delayed_work(work), + struct backing_dev_info, dwork); + struct bdi_writeback *wb = &bdi->wb; long pages_written; set_worker_desc("flush-%s", dev_name(bdi->dev)); @@ -951,7 +951,7 @@ void bdi_writeback_workfn(struct work_struct *work) } if (!list_empty(&bdi->work_list)) - mod_delayed_work(bdi_wq, &wb->dwork, 0); + mod_delayed_work(bdi_wq, &bdi->dwork, 0); else if (wb_has_dirty_io(wb) && dirty_writeback_interval) bdi_wakeup_thread_delayed(bdi); diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index e488e9459a93..420750f5ed10 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -50,11 +50,7 @@ enum bdi_stat_item { struct bdi_writeback { struct backing_dev_info *bdi; /* our parent bdi */ - unsigned int nr; - unsigned long last_old_flush; /* last old data flush */ - - struct delayed_work dwork; /* work item used for writeback */ struct list_head b_dirty; /* dirty inodes */ struct list_head b_io; /* parked for writeback */ struct list_head b_more_io; /* parked for more writeback */ @@ -94,6 +90,10 @@ struct backing_dev_info { unsigned int min_ratio; unsigned int max_ratio, max_prop_frac; + unsigned long last_old_flush; /* last old data flush */ + + struct delayed_work dwork; /* work item used for writeback */ + struct bdi_writeback wb; /* default writeback info for this bdi */ spinlock_t wb_lock; /* protects work_list & wb.dwork scheduling */ diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 1706cbbdf5f0..c44ba43d580d 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -299,7 +299,7 @@ void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi) timeout = msecs_to_jiffies(dirty_writeback_interval * 10); spin_lock_bh(&bdi->wb_lock); if (test_bit(BDI_registered, &bdi->state)) - queue_delayed_work(bdi_wq, &bdi->wb.dwork, timeout); + queue_delayed_work(bdi_wq, &bdi->dwork, timeout); spin_unlock_bh(&bdi->wb_lock); } @@ -373,8 +373,8 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi) * @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi * is dying and its work_list needs to be drained no matter what. */ - mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); - flush_delayed_work(&bdi->wb.dwork); + mod_delayed_work(bdi_wq, &bdi->dwork, 0); + flush_delayed_work(&bdi->dwork); WARN_ON(!list_empty(&bdi->work_list)); /* @@ -382,7 +382,7 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi) * unflushed dirty IO after work_list is drained. Do it anyway * just in case. */ - cancel_delayed_work_sync(&bdi->wb.dwork); + cancel_delayed_work_sync(&bdi->dwork); } /* @@ -426,12 +426,10 @@ static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) memset(wb, 0, sizeof(*wb)); wb->bdi = bdi; - wb->last_old_flush = jiffies; INIT_LIST_HEAD(&wb->b_dirty); INIT_LIST_HEAD(&wb->b_io); INIT_LIST_HEAD(&wb->b_more_io); spin_lock_init(&wb->list_lock); - INIT_DELAYED_WORK(&wb->dwork, bdi_writeback_workfn); } /* @@ -452,6 +450,8 @@ int bdi_init(struct backing_dev_info *bdi) INIT_LIST_HEAD(&bdi->bdi_list); INIT_LIST_HEAD(&bdi->work_list); + bdi->last_old_flush = jiffies; + INIT_DELAYED_WORK(&bdi->dwork, bdi_writeback_workfn); bdi_wb_init(&bdi->wb, bdi); for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { @@ -508,7 +508,7 @@ void bdi_destroy(struct backing_dev_info *bdi) * could still be pending because bdi_prune_sb() can race with the * bdi_wakeup_thread_delayed() calls from __mark_inode_dirty(). */ - cancel_delayed_work_sync(&bdi->wb.dwork); + cancel_delayed_work_sync(&bdi->dwork); for (i = 0; i < NR_BDI_STAT_ITEMS; i++) percpu_counter_destroy(&bdi->bdi_stat[i]); -- 1.8.1.4 -- To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html