The only place where we refill b_io is just before calling writeback_inodes() (and that function has only one call site). Move the refill into writeback_inodes(). This allows for easier separation when transitioning to per-sb dirty tracking. Signed-off-by: Jan Kara <jack@xxxxxxx> --- fs/fs-writeback.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index a3f37b128446..6caf55858dcb 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -563,7 +563,8 @@ static long writeback_chunk_size(struct backing_dev_info *bdi, } /* - * Write inodes in b_io list belonging to @work->sb (if set). + * Refill b_io list if needed and start writing inodes on that list belonging + * to @work->sb (if set). * * Return the number of pages and/or inodes written. */ @@ -584,6 +585,9 @@ static long writeback_inodes(struct bdi_writeback *wb, long write_chunk; long wrote = 0; /* count both pages and inodes */ + spin_lock(&wb->list_lock); + if (list_empty(&wb->b_io)) + queue_io(wb, work); while (!list_empty(&wb->b_io)) { struct inode *inode = wb_inode(wb->b_io.prev); @@ -668,6 +672,8 @@ static long writeback_inodes(struct bdi_writeback *wb, break; } } + spin_unlock(&wb->list_lock); + return wrote; } @@ -764,15 +770,10 @@ static long bdi_writeback(struct backing_dev_info *bdi, oldest_jif = jiffies; trace_writeback_start(bdi, work); - spin_lock(&wb->list_lock); - if (list_empty(&wb->b_io)) - queue_io(wb, work); progress = writeback_inodes(wb, work); - spin_unlock(&wb->list_lock); trace_writeback_written(bdi, work); update_bandwidth(bdi, wb_start); - /* * Did we write something? Try for more * -- 1.8.1.4 -- To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html