Re: [PATCH v8 16/34] xfs: convert buftarg LRU to generic code

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 05/25/2013 05:57 AM, Dave Chinner wrote:
> On Fri, May 24, 2013 at 03:59:10PM +0530, Glauber Costa wrote:
>> From: Dave Chinner <dchinner@xxxxxxxxxx>
>>
>> Convert the buftarg LRU to use the new generic LRU list and take
>> advantage of the functionality it supplies to make the buffer cache
>> shrinker node aware.
>>
>> * v7: Add NUMA aware flag
> 
> I know what is wrong with this patch that causes the unmount hang -
> it's the handling of the _XBF_LRU_DISPOSE flag no longer being
> modified atomically with the LRU lock. Hence there is a race where
> we can either lose the _XBF_LRU_DISPOSE or not see it and hence we
> can end up with code not detecting what list the buffer is on
> correctly.
> 
> I haven't had a chance to work out a fix for it yet. If this ends up
> likely to hold up the patch set, Glauber, then feel free to drop it
> from the series and I'll push a fixed version through the XFS tree
> in due course....
> 
> Cheers,
> 
> Dave.
> 
Please let me know what you think about the following two (very coarse)
patches. My idea is to expose more of the raw structures so XFS can do
the locking itself when needed.

The memcg parts need to be rebased on top of that. If you agree with the
approach, I will proceed with doing this.

diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index cf59a8a..95a73ea 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -40,8 +40,37 @@ struct list_lru {
 };
 
 int list_lru_init(struct list_lru *lru);
-int list_lru_add(struct list_lru *lru, struct list_head *item);
-int list_lru_del(struct list_lru *lru, struct list_head *item);
+
+struct list_lru_node *list_lru_to_node(struct list_lru *lru,
+				       struct list_head *item);
+int list_lru_add_node(struct list_lru *lru, struct list_lru_node *nlru,
+		      struct list_head *item);
+int list_lru_del_node(struct list_lru *lru, struct list_lru_node *nlru,
+		      struct list_head *item);
+
+static inline int list_lru_add(struct list_lru *lru, struct list_head *item)
+{
+	int ret;
+	struct list_lru_node *nlru = list_lru_to_node(lru, item);
+
+	spin_lock(&nlru->lock);
+	ret = list_lru_add_node(lru, nlru, item);
+	spin_unlock(&nlru->lock);
+
+	return ret;
+}
+
+static inline int list_lru_del(struct list_lru *lru, struct list_head *item)
+{
+	int ret;
+	struct list_lru_node *nlru = list_lru_to_node(lru, item);
+
+	spin_lock(&nlru->lock);
+	ret = list_lru_del_node(lru, nlru, item);
+	spin_unlock(&nlru->lock);
+
+	return ret;
+}
 
 unsigned long list_lru_count_node(struct list_lru *lru, int nid);
 static inline unsigned long list_lru_count(struct list_lru *lru)
diff --git a/lib/list_lru.c b/lib/list_lru.c
index dae13d6..f72029d 100644
--- a/lib/list_lru.c
+++ b/lib/list_lru.c
@@ -9,49 +9,53 @@
 #include <linux/mm.h>
 #include <linux/list_lru.h>
 
-int
-list_lru_add(
+struct list_lru_node *
+list_lru_to_node(
 	struct list_lru	*lru,
 	struct list_head *item)
 {
 	int nid = page_to_nid(virt_to_page(item));
-	struct list_lru_node *nlru = &lru->node[nid];
+	return &lru->node[nid];
+}
+EXPORT_SYMBOL_GPL(list_lru_to_node);
+
+int
+list_lru_add_node(
+	struct list_lru		*lru,
+	struct list_lru_node	*nlru,
+	struct list_head	*item)
+{
+	int nid = page_to_nid(virt_to_page(item));
 
-	spin_lock(&nlru->lock);
 	BUG_ON(nlru->nr_items < 0);
 	if (list_empty(item)) {
 		list_add_tail(item, &nlru->list);
 		if (nlru->nr_items++ == 0)
 			node_set(nid, lru->active_nodes);
-		spin_unlock(&nlru->lock);
 		return 1;
 	}
-	spin_unlock(&nlru->lock);
 	return 0;
 }
-EXPORT_SYMBOL_GPL(list_lru_add);
+EXPORT_SYMBOL_GPL(list_lru_add_node);
 
 int
-list_lru_del(
-	struct list_lru	*lru,
-	struct list_head *item)
+list_lru_del_node(
+	struct list_lru		*lru,
+	struct list_lru_node	*nlru,
+	struct list_head	*item)
 {
 	int nid = page_to_nid(virt_to_page(item));
-	struct list_lru_node *nlru = &lru->node[nid];
 
-	spin_lock(&nlru->lock);
 	if (!list_empty(item)) {
 		list_del_init(item);
 		if (--nlru->nr_items == 0)
 			node_clear(nid, lru->active_nodes);
 		BUG_ON(nlru->nr_items < 0);
-		spin_unlock(&nlru->lock);
 		return 1;
 	}
-	spin_unlock(&nlru->lock);
 	return 0;
 }
-EXPORT_SYMBOL_GPL(list_lru_del);
+EXPORT_SYMBOL_GPL(list_lru_del_node);
 
 unsigned long
 list_lru_count_node(struct list_lru *lru, int nid)
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index f7212c1..3e179f2 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -89,10 +89,15 @@ static void
 xfs_buf_lru_add(
 	struct xfs_buf	*bp)
 {
-	if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
+	struct list_lru_node *nlru;
+	nlru = list_lru_to_node(&bp->b_target->bt_lru, &bp->b_lru);
+
+	spin_lock(&nlru->lock);
+	if (list_lru_add_node(&bp->b_target->bt_lru, nlru, &bp->b_lru)) {
 		bp->b_lru_flags &= ~_XBF_LRU_DISPOSE;
 		atomic_inc(&bp->b_hold);
 	}
+	spin_unlock(&nlru->lock);
 }
 
 /*
@@ -122,6 +127,9 @@ void
 xfs_buf_stale(
 	struct xfs_buf	*bp)
 {
+	struct list_lru_node *nlru;
+	nlru = list_lru_to_node(&bp->b_target->bt_lru, &bp->b_lru);
+
 	ASSERT(xfs_buf_islocked(bp));
 
 	bp->b_flags |= XBF_STALE;
@@ -133,10 +141,12 @@ xfs_buf_stale(
 	 */
 	bp->b_flags &= ~_XBF_DELWRI_Q;
 
+	spin_lock(&nlru->lock);
 	atomic_set(&(bp)->b_lru_ref, 0);
 	if (!(bp->b_lru_flags & _XBF_LRU_DISPOSE) &&
-	    (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
+	    (list_lru_del_node(&bp->b_target->bt_lru, nlru, &bp->b_lru)))
 		atomic_dec(&bp->b_hold);
+	spin_unlock(&nlru->lock);
 
 	ASSERT(atomic_read(&bp->b_hold) >= 1);
 }

[Index of Archives]     [Linux Ext4 Filesystem]     [Union Filesystem]     [Filesystem Testing]     [Ceph Users]     [Ecryptfs]     [AutoFS]     [Kernel Newbies]     [Share Photos]     [Security]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux Cachefs]     [Reiser Filesystem]     [Linux RAID]     [Samba]     [Device Mapper]     [CEPH Development]
  Powered by Linux