[PATCH 3/4] writeback: Replace several writeback lists with inode tagging

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Currently writeback code maintains three lists with dirty inodes -
b_dirty list, b_io list of inodes to write, and b_more_io list of inodes
which need more writeback. Inodes on b_dirty list are sorted by their
i_dirtied_when timestamp so that we can easily select inodes for kupdate
style writeback and also so that we keep the natural expectation that
first written files should reach the disk first.

This design has however the downside that when we need to move inode
from b_io to b_dirty list (e.g. when writeback for the inode is stalled
because of some locks), we have to set i_dirtied_when timestamp to
current time (finding proper place in the b_dirty list so that it
remains sorted would be too time consuming). So we lose the information
when the inode was dirtied.

In this patch we change writeback code so that is maintains only a
single list of dirty inodes - b_dirty list - sorted by i_dirtied_when
timestamp. Identification of inodes that still need to be considered for
writeback is done by tagging inodes with a flag I_TO_WRITE. This
somewhat simplifies the logic and also allows us not to clobber
i_dirtied_when timestamp.

Signed-off-by: Jan Kara <jack@xxxxxxx>
---
 fs/fs-writeback.c                | 273 +++++++++++++--------------------------
 include/linux/backing-dev.h      |   6 +-
 include/linux/fs.h               |   4 +
 include/trace/events/writeback.h |  27 ++--
 mm/backing-dev.c                 |  16 +--
 5 files changed, 116 insertions(+), 210 deletions(-)

diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index b54ec30541e6..6bf9c50ecd53 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -191,37 +191,6 @@ void inode_wb_list_del(struct inode *inode)
 	spin_unlock(&bdi->wb.list_lock);
 }
 
-/*
- * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
- * furthest end of its superblock's dirty-inode list.
- *
- * Before stamping the inode's ->dirtied_when, we check to see whether it is
- * already the most-recently-dirtied inode on the b_dirty list.  If that is
- * the case then the inode must have been redirtied while it was being written
- * out and we don't reset its dirtied_when.
- */
-static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
-{
-	assert_spin_locked(&wb->list_lock);
-	if (!list_empty(&wb->b_dirty)) {
-		struct inode *tail;
-
-		tail = wb_inode(wb->b_dirty.next);
-		if (time_before(inode->dirtied_when, tail->dirtied_when))
-			inode->dirtied_when = jiffies;
-	}
-	list_move(&inode->i_wb_list, &wb->b_dirty);
-}
-
-/*
- * requeue inode for re-scanning after bdi->b_io list is exhausted.
- */
-static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
-{
-	assert_spin_locked(&wb->list_lock);
-	list_move(&inode->i_wb_list, &wb->b_more_io);
-}
-
 static void inode_sync_complete(struct inode *inode)
 {
 	inode->i_state &= ~I_SYNC;
@@ -247,47 +216,25 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t)
 	return ret;
 }
 
-/*
- * Move expired (dirtied before work->older_than_this) dirty inodes from
- * @delaying_queue to @dispatch_queue.
- */
-static int move_expired_inodes(struct list_head *delaying_queue,
-			       struct list_head *dispatch_queue,
-			       struct wb_writeback_work *work)
+static void tag_for_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
 {
+	int tagged = 0;
 	struct inode *inode;
-	int moved = 0;
 
-	while (!list_empty(delaying_queue)) {
-		inode = wb_inode(delaying_queue->prev);
+	assert_spin_locked(&wb->list_lock);
+	list_for_each_entry(inode, &wb->b_dirty, i_wb_list) {
 		if (work->older_than_this &&
 		    inode_dirtied_after(inode, *work->older_than_this))
 			break;
-		list_move(&inode->i_wb_list, dispatch_queue);
-		moved++;
+		if ((!work->sb || inode->i_sb == work->sb) &&
+		    !(inode->i_state & I_TO_WRITE)) {
+			spin_lock(&inode->i_lock);
+			inode->i_state |= I_TO_WRITE;
+			spin_unlock(&inode->i_lock);
+			tagged++;
+		}
 	}
-out:
-	return moved;
-}
-
-/*
- * Queue all expired dirty inodes for io, eldest first.
- * Before
- *         newly dirtied     b_dirty    b_io    b_more_io
- *         =============>    gf         edc     BA
- * After
- *         newly dirtied     b_dirty    b_io    b_more_io
- *         =============>    g          fBAedc
- *                                           |
- *                                           +--> dequeue for IO
- */
-static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
-{
-	int moved;
-	assert_spin_locked(&wb->list_lock);
-	list_splice_init(&wb->b_more_io, &wb->b_io);
-	moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work);
-	trace_writeback_queue_io(wb, work, moved);
+	trace_writeback_tag_for_io(wb, work, tagged);
 }
 
 static int write_inode(struct inode *inode, struct writeback_control *wbc)
@@ -353,66 +300,37 @@ static void inode_sleep_on_writeback(struct inode *inode)
 }
 
 /*
- * Find proper writeback list for the inode depending on its current state and
- * possibly also change of its state while we were doing writeback.  Here we
- * handle things such as livelock prevention or fairness of writeback among
- * inodes. This function can be called only by flusher thread - noone else
- * processes all inodes in writeback lists and requeueing inodes behind flusher
- * thread's back can have unexpected consequences.
+ * Decide whether we should try further writeback on the inode or exclude it
+ * from this writeback round. Also remove inode from dirty list if it got
+ * clean.
  */
-static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
-			  struct writeback_control *wbc)
+static void requeue_inode(struct inode *inode, struct writeback_control *wbc)
 {
 	if (inode->i_state & I_FREEING)
-		return;
+		goto exclude;
 
-	/*
-	 * Sync livelock prevention. Each inode is tagged and synced in one
-	 * shot. If still dirty, it will be redirty_tail()'ed below.  Update
-	 * the dirty time to prevent enqueue and sync it again.
-	 */
-	if ((inode->i_state & I_DIRTY) &&
-	    (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
-		inode->dirtied_when = jiffies;
+	/* The inode is clean. Remove from writeback lists. */
+	if (!(inode->i_state & I_DIRTY)) {
+		list_del_init(&inode->i_wb_list);
+		goto exclude;
+	}
 
-	if (wbc->pages_skipped) {
-		/*
-		 * writeback is not making progress due to locked
-		 * buffers. Skip this inode for now.
-		 */
-		redirty_tail(inode, wb);
+	/* We used up our writeback chunk, give inode another try */
+	if (wbc->nr_to_write <= 0)
 		return;
-	}
 
-	if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
-		/*
-		 * We didn't write back all the pages.  nfs_writepages()
-		 * sometimes bales out without doing anything.
-		 */
-		if (wbc->nr_to_write <= 0) {
-			/* Slice used up. Queue for next turn. */
-			requeue_io(inode, wb);
-		} else {
-			/*
-			 * Writeback blocked by something other than
-			 * congestion. Delay the inode for some time to
-			 * avoid spinning on the CPU (100% iowait)
-			 * retrying writeback of the dirty page/inode
-			 * that cannot be performed immediately.
-			 */
-			redirty_tail(inode, wb);
-		}
-	} else if (inode->i_state & I_DIRTY) {
-		/*
-		 * Filesystems can dirty the inode during writeback operations,
-		 * such as delayed allocation during submission or metadata
-		 * updates after data IO completion.
-		 */
-		redirty_tail(inode, wb);
-	} else {
-		/* The inode is clean. Remove from writeback lists. */
-		list_del_init(&inode->i_wb_list);
-	}
+	/*
+	 * In all the other cases we exclude inode from further writeback
+	 * in this writeback round. We know the inode is dirty although
+	 * we didn't use all of our writeback chunk. This means that
+	 * writeback was blocked for some reason or inode was redirtied.
+	 * In the first case we exclude inode from writeback to avoid
+	 * busylooping, in the second case to avoid livelocks. Note that
+	 * for_background and for_kupdate writeback will tag inodes again
+	 * after finishing one pass through the list.
+	 */
+exclude:
+	inode->i_state &= ~I_TO_WRITE;
 }
 
 /*
@@ -523,8 +441,10 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
 	 * If inode is clean, remove it from writeback lists. Otherwise don't
 	 * touch it. See comment above for explanation.
 	 */
-	if (!(inode->i_state & I_DIRTY))
+	if (!(inode->i_state & I_DIRTY)) {
+		inode->i_state &= ~I_TO_WRITE;
 		list_del_init(&inode->i_wb_list);
+	}
 	spin_unlock(&wb->list_lock);
 	inode_sync_complete(inode);
 out:
@@ -584,19 +504,24 @@ static long writeback_inodes(struct bdi_writeback *wb,
 	unsigned long start_time = jiffies;
 	long write_chunk;
 	long wrote = 0;  /* count both pages and inodes */
+	struct inode *inode, *next;
 
-	while (!list_empty(&wb->b_io)) {
-		struct inode *inode = wb_inode(wb->b_io.prev);
+restart:
+	/* We use list_safe_reset_next() to make the list iteration safe */
+	list_for_each_entry_safe(inode, next, &wb->b_dirty, i_wb_list) {
+		/* Don't bother scanning too new inodes */
+		if (work->older_than_this &&
+		    inode_dirtied_after(inode, *work->older_than_this))
+			break;
 
-		if (work->sb && inode->i_sb != work->sb) {
-			/*
-			 * We only want to write back data for this
-			 * superblock, move all inodes not belonging
-			 * to it back onto the dirty list.
-			 */
-			redirty_tail(inode, wb);
+		if (!(inode->i_state & I_TO_WRITE))
+			continue;
+		/*
+		 * Skip inodes on different sb. They can be tagged from
+		 * previous writeback rounds.
+		 */
+		if (work->sb && inode->i_sb != work->sb)
 			continue;
-		}
 
 		/*
 		 * Don't bother with new inodes or inodes being freed, first
@@ -605,22 +530,18 @@ static long writeback_inodes(struct bdi_writeback *wb,
 		 */
 		spin_lock(&inode->i_lock);
 		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
+			inode->i_state &= ~I_TO_WRITE;
 			spin_unlock(&inode->i_lock);
-			redirty_tail(inode, wb);
 			continue;
 		}
 		if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
 			/*
 			 * If this inode is locked for writeback and we are not
-			 * doing writeback-for-data-integrity, move it to
-			 * b_more_io so that writeback can proceed with the
-			 * other inodes on s_io.
-			 *
-			 * We'll have another go at writing back this inode
-			 * when we completed a full scan of b_io.
+			 * doing writeback-for-data-integrity, exclude inode
+			 * from writeback.
 			 */
+			inode->i_state &= ~I_TO_WRITE;
 			spin_unlock(&inode->i_lock);
-			requeue_io(inode, wb);
 			trace_writeback_inodes_requeue(inode);
 			continue;
 		}
@@ -632,11 +553,17 @@ static long writeback_inodes(struct bdi_writeback *wb,
 		 * WB_SYNC_ALL case.
 		 */
 		if (inode->i_state & I_SYNC) {
+			trace_writeback_inodes_restart(inode);
 			/* Wait for I_SYNC. This function drops i_lock... */
 			inode_sleep_on_writeback(inode);
-			/* Inode may be gone, start again */
 			spin_lock(&wb->list_lock);
-			continue;
+			/*
+			 * Start again since inode may be gone so we have no
+			 * fixed point in list to start from. Luckily this case
+			 * is rare (it requires someone else to call
+			 * sync_inode() or a similar function).
+			 */
+			goto restart;
 		}
 		inode->i_state |= I_SYNC;
 		spin_unlock(&inode->i_lock);
@@ -657,10 +584,15 @@ static long writeback_inodes(struct bdi_writeback *wb,
 		spin_lock(&inode->i_lock);
 		if (!(inode->i_state & I_DIRTY))
 			wrote++;
-		requeue_inode(inode, wb, &wbc);
+		/*
+		 * Update next after retaking list_lock but before removing
+		 * inode from the list in requeue_inode(). We are guaranteed
+		 * current inode stays in the list because it has I_SYNC set.
+		 */
+		list_safe_reset_next(inode, next, i_wb_list);
+		requeue_inode(inode, &wbc);
 		inode_sync_complete(inode);
 		spin_unlock(&inode->i_lock);
-		cond_resched_lock(&wb->list_lock);
 		/*
 		 * bail out to wb_writeback() often enough to check
 		 * background threshold and other termination conditions.
@@ -686,8 +618,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
 	};
 
 	spin_lock(&wb->list_lock);
-	if (list_empty(&wb->b_io))
-		queue_io(wb, &work);
+	tag_for_io(wb, &work);
 	writeback_inodes(wb, &work);
 	spin_unlock(&wb->list_lock);
 
@@ -742,14 +673,21 @@ static long wb_writeback(struct bdi_writeback *wb,
 	unsigned long wb_start = jiffies;
 	long nr_pages = work->nr_pages;
 	unsigned long oldest_jif;
-	struct inode *inode;
 	long progress;
 
+	/*
+	 * We protect against writeback livelocks when creating new files by
+	 * writing only files created before writeback started. Background and
+	 * kupdate style writeback are handled in a special way below.
+	 */
 	oldest_jif = jiffies;
 	work->older_than_this = &oldest_jif;
 
 	spin_lock(&wb->list_lock);
-	for (;;) {
+	if (!work->for_background && !work->for_kupdate)
+		tag_for_io(wb, work);
+
+	do {
 		/*
 		 * Stop writeback when nr_pages has been consumed
 		 */
@@ -782,47 +720,18 @@ static long wb_writeback(struct bdi_writeback *wb,
 		if (work->for_kupdate) {
 			oldest_jif = jiffies -
 				msecs_to_jiffies(dirty_expire_interval * 10);
-		} else if (work->for_background)
+			tag_for_io(wb, work);
+		} else if (work->for_background) {
 			oldest_jif = jiffies;
+			tag_for_io(wb, work);
+		}
 
 		trace_writeback_start(wb->bdi, work);
-		if (list_empty(&wb->b_io))
-			queue_io(wb, work);
 		progress = writeback_inodes(wb, work);
 		trace_writeback_written(wb->bdi, work);
 
 		wb_update_bandwidth(wb, wb_start);
-
-		/*
-		 * Did we write something? Try for more
-		 *
-		 * Dirty inodes are moved to b_io for writeback in batches.
-		 * The completion of the current batch does not necessarily
-		 * mean the overall work is done. So we keep looping as long
-		 * as made some progress on cleaning pages or inodes.
-		 */
-		if (progress)
-			continue;
-		/*
-		 * No more inodes for IO, bail
-		 */
-		if (list_empty(&wb->b_more_io))
-			break;
-		/*
-		 * Nothing written. Wait for some inode to
-		 * become available for writeback. Otherwise
-		 * we'll just busyloop.
-		 */
-		if (!list_empty(&wb->b_more_io))  {
-			trace_writeback_wait(wb->bdi, work);
-			inode = wb_inode(wb->b_more_io.prev);
-			spin_lock(&inode->i_lock);
-			spin_unlock(&wb->list_lock);
-			/* This function drops i_lock... */
-			inode_sleep_on_writeback(inode);
-			spin_lock(&wb->list_lock);
-		}
-	}
+	} while (progress);
 	spin_unlock(&wb->list_lock);
 
 	return nr_pages - work->nr_pages;
@@ -1113,8 +1022,8 @@ void __mark_inode_dirty(struct inode *inode, int flags)
 			goto out_unlock_inode;
 
 		/*
-		 * If the inode was already on b_dirty/b_io/b_more_io, don't
-		 * reposition it (that would break b_dirty time-ordering).
+		 * If the inode was already on b_dirty, don't reposition it
+		 * (that would break b_dirty time-ordering).
 		 */
 		if (!was_dirty) {
 			bool wakeup_bdi = false;
@@ -1137,7 +1046,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
 			}
 
 			inode->dirtied_when = jiffies;
-			list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
+			list_move_tail(&inode->i_wb_list, &bdi->wb.b_dirty);
 			spin_unlock(&bdi->wb.list_lock);
 
 			if (wakeup_bdi)
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index e488e9459a93..897f6ff8c982 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -56,8 +56,6 @@ struct bdi_writeback {
 
 	struct delayed_work dwork;	/* work item used for writeback */
 	struct list_head b_dirty;	/* dirty inodes */
-	struct list_head b_io;		/* parked for writeback */
-	struct list_head b_more_io;	/* parked for more writeback */
 	spinlock_t list_lock;		/* protects the b_* lists */
 };
 
@@ -133,9 +131,7 @@ extern struct workqueue_struct *bdi_wq;
 
 static inline int wb_has_dirty_io(struct bdi_writeback *wb)
 {
-	return !list_empty(&wb->b_dirty) ||
-	       !list_empty(&wb->b_io) ||
-	       !list_empty(&wb->b_more_io);
+	return !list_empty(&wb->b_dirty);
 }
 
 static inline void __add_bdi_stat(struct backing_dev_info *bdi,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 878031227c57..99fcc1221c24 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1661,6 +1661,9 @@ struct super_operations {
  *
  * I_DIO_WAKEUP		Never set.  Only used as a key for wait_on_bit().
  *
+ * I_TO_WRITE		Used by flusher thread to track which inodes need
+ *			writing in this writeback round.
+ *
  * Q: What is the difference between I_WILL_FREE and I_FREEING?
  */
 #define I_DIRTY_SYNC		(1 << 0)
@@ -1677,6 +1680,7 @@ struct super_operations {
 #define __I_DIO_WAKEUP		9
 #define I_DIO_WAKEUP		(1 << I_DIO_WAKEUP)
 #define I_LINKABLE		(1 << 10)
+#define I_TO_WRITE		(1 << 11)
 
 #define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
 
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 9bf6f2da32d2..071eb2eea350 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -188,7 +188,6 @@ DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
 DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
 DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
 DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
-DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
 
 TRACE_EVENT(writeback_pages_written,
 	TP_PROTO(long pages_written),
@@ -275,16 +274,16 @@ DEFINE_EVENT(wbc_class, name, \
 	TP_ARGS(wbc, bdi))
 DEFINE_WBC_EVENT(wbc_writepage);
 
-TRACE_EVENT(writeback_queue_io,
+TRACE_EVENT(writeback_tag_for_io,
 	TP_PROTO(struct bdi_writeback *wb,
 		 struct wb_writeback_work *work,
-		 int moved),
-	TP_ARGS(wb, work, moved),
+		 int tagged),
+	TP_ARGS(wb, work, tagged),
 	TP_STRUCT__entry(
 		__array(char,		name, 32)
 		__field(unsigned long,	older)
 		__field(long,		age)
-		__field(int,		moved)
+		__field(int,		tagged)
 		__field(int,		reason)
 	),
 	TP_fast_assign(
@@ -293,14 +292,14 @@ TRACE_EVENT(writeback_queue_io,
 		__entry->older	= older_than_this ?  *older_than_this : 0;
 		__entry->age	= older_than_this ?
 				  (jiffies - *older_than_this) * 1000 / HZ : -1;
-		__entry->moved	= moved;
+		__entry->tagged	= tagged;
 		__entry->reason	= work->reason;
 	),
-	TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s",
+	TP_printk("bdi %s: older=%lu age=%ld tagged=%d reason=%s",
 		__entry->name,
 		__entry->older,	/* older_than_this in jiffies */
 		__entry->age,	/* older_than_this in relative milliseconds */
-		__entry->moved,
+		__entry->tagged,
 		__print_symbolic(__entry->reason, WB_WORK_REASON)
 	)
 );
@@ -477,7 +476,7 @@ TRACE_EVENT(balance_dirty_pages,
 	  )
 );
 
-TRACE_EVENT(writeback_inodes_requeue,
+DECLARE_EVENT_CLASS(writeback_inodes_template,
 
 	TP_PROTO(struct inode *inode),
 	TP_ARGS(inode),
@@ -506,6 +505,16 @@ TRACE_EVENT(writeback_inodes_requeue,
 	)
 );
 
+DEFINE_EVENT(writeback_inodes_template, writeback_inodes_requeue,
+	TP_PROTO(struct inode *inode),
+	TP_ARGS(inode)
+);
+
+DEFINE_EVENT(writeback_inodes_template, writeback_inodes_restart,
+	TP_PROTO(struct inode *inode),
+	TP_ARGS(inode)
+);
+
 DECLARE_EVENT_CLASS(writeback_congest_waited_template,
 
 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 09d9591b7708..322d704c6cfe 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -69,17 +69,13 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
 	unsigned long background_thresh;
 	unsigned long dirty_thresh;
 	unsigned long bdi_thresh;
-	unsigned long nr_dirty, nr_io, nr_more_io;
+	unsigned long nr_dirty;
 	struct inode *inode;
 
-	nr_dirty = nr_io = nr_more_io = 0;
+	nr_dirty = 0;
 	spin_lock(&wb->list_lock);
 	list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
 		nr_dirty++;
-	list_for_each_entry(inode, &wb->b_io, i_wb_list)
-		nr_io++;
-	list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
-		nr_more_io++;
 	spin_unlock(&wb->list_lock);
 
 	global_dirty_limits(&background_thresh, &dirty_thresh);
@@ -96,8 +92,6 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
 		   "BdiWritten:         %10lu kB\n"
 		   "BdiWriteBandwidth:  %10lu kBps\n"
 		   "b_dirty:            %10lu\n"
-		   "b_io:               %10lu\n"
-		   "b_more_io:          %10lu\n"
 		   "bdi_list:           %10u\n"
 		   "state:              %10lx\n",
 		   (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
@@ -109,8 +103,6 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
 		   (unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)),
 		   (unsigned long) K(bdi->write_bandwidth),
 		   nr_dirty,
-		   nr_io,
-		   nr_more_io,
 		   !list_empty(&bdi->bdi_list), bdi->state);
 #undef K
 
@@ -428,8 +420,6 @@ static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
 	wb->bdi = bdi;
 	wb->last_old_flush = jiffies;
 	INIT_LIST_HEAD(&wb->b_dirty);
-	INIT_LIST_HEAD(&wb->b_io);
-	INIT_LIST_HEAD(&wb->b_more_io);
 	spin_lock_init(&wb->list_lock);
 	INIT_DELAYED_WORK(&wb->dwork, bdi_writeback_workfn);
 }
@@ -495,8 +485,6 @@ void bdi_destroy(struct backing_dev_info *bdi)
 
 		bdi_lock_two(&bdi->wb, dst);
 		list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
-		list_splice(&bdi->wb.b_io, &dst->b_io);
-		list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
 		spin_unlock(&bdi->wb.list_lock);
 		spin_unlock(&dst->list_lock);
 	}
-- 
1.8.1.4

--
To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html




[Index of Archives]     [Linux Ext4 Filesystem]     [Union Filesystem]     [Filesystem Testing]     [Ceph Users]     [Ecryptfs]     [AutoFS]     [Kernel Newbies]     [Share Photos]     [Security]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux Cachefs]     [Reiser Filesystem]     [Linux RAID]     [Samba]     [Device Mapper]     [CEPH Development]
  Powered by Linux