[PATCH 19/19] quota: protect i_dquot with i_lock instead of dqptr_sem

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



dqptr_sem is one of the most contenting locks for now
each dquot_initialize and dquot_transfer result in down_write(dqptr_sem)
Let's user inode->i_lock to protect i_dquot pointers. In that case all
users which modified i_dquot simply converted to that lock. But users
who hold the dqptr_sem for read(charge/uncharge methods) usually
looks like follows

down_read(&dqptr_sem)
___charge_quota()
make_quota_dirty(inode->i_dquot) --> may_sleep
up_read(&dquot_sem)

We must drop i_lock before make_quota_dirty or flush_warnings,
to protect dquot from being fried let's grab extra reference for dquot,
and drop it after we have done with dquot object.

Signed-off-by: Dmitry Monakhov <dmonakhov@xxxxxxxxxx>
---
 fs/quota/dquot.c         |  314 +++++++++++++++++----------------------------
 include/linux/quota.h    |    2 -
 include/linux/quotaops.h |    4 +-
 3 files changed, 121 insertions(+), 199 deletions(-)

diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index b2cf04d..de3990f 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -91,19 +91,17 @@
  * in inode_add_bytes() and inode_sub_bytes().
  *
  * The spinlock ordering is hence:
- * dq_data_lock > dq_lock > dq_list_lock > i_lock,
+ * dq_data_lock > i_lock > dq_lock > dq_list_lock
  * dq_list_lock > hlist_bl_head
  *
  * Note that some things (eg. sb pointer, type, id) doesn't change during
  * the life of the dquot structure and so needn't to be protected by a lock
  *
- * Any operation working on dquots via inode pointers must hold dqptr_sem.  If
- * operation is just reading pointers from inode (or not using them at all) the
- * read lock is enough. If pointers are altered function must hold write lock.
+ * Any operation working on dquots via inode pointers must hold i_lock.
  * Special care needs to be taken about S_NOQUOTA inode flag (marking that
  * inode is a quota file). Functions adding pointers from inode to dquots have
- * to check this flag under dqptr_sem and then (if S_NOQUOTA is not set) they
- * have to do all pointer modifications before dropping dqptr_sem. This makes
+ * to check this flag under i_lock and then (if S_NOQUOTA is not set) they
+ * have to do all pointer modifications before dropping i_lock. This makes
  * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
  * then drops all pointers to dquots from an inode.
  *
@@ -118,14 +116,7 @@
  * spinlock to internal buffers before writing.
  *
  * Lock ordering (including related VFS locks) is the following:
- *   i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_mutex >
- *   dqio_mutex
- * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
- * dqptr_sem. But filesystem has to count with the fact that functions such as
- * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
- * from inside a transaction to keep filesystem consistency after a crash. Also
- * filesystems usually want to do some IO on dquot from ->mark_dirty which is
- * called with dqptr_sem held.
+ *   i_mutex > dqonoff_sem > journal_lock > dquot->dq_mutex > dqio_mutex
  * i_mutex on quota files is special (it's below dqio_mutex)
  */
 
@@ -778,7 +769,6 @@ static struct shrinker dqcache_shrinker = {
 
 /*
  * Put reference to dquot
- * NOTE: If you change this function please check whether dqput_blocks() works right...
  */
 void dqput(struct dquot *dquot)
 {
@@ -1019,50 +1009,6 @@ static void add_dquot_ref(struct super_block *sb, int type)
 }
 
 /*
- * Return 0 if dqput() won't block.
- * (note that 1 doesn't necessarily mean blocking)
- */
-static inline int dqput_blocks(struct dquot *dquot)
-{
-	if (atomic_read(&dquot->dq_count) <= 1)
-		return 1;
-	return 0;
-}
-
-/*
- * Remove references to dquots from inode and add dquot to list for freeing
- * if we have the last referece to dquot
- * We can't race with anybody because we hold dqptr_sem for writing...
- */
-static int remove_inode_dquot_ref(struct inode *inode, int type,
-				  struct list_head *tofree_head)
-{
-	struct dquot *dquot = inode->i_dquot[type];
-	struct quota_info *dqopt = dqopts(inode->i_sb);
-
-	inode->i_dquot[type] = NULL;
-	if (dquot) {
-		if (dqput_blocks(dquot)) {
-#ifdef CONFIG_QUOTA_DEBUG
-			if (atomic_read(&dquot->dq_count) != 1)
-				quota_error(inode->i_sb, "Adding dquot with "
-					    "dq_count %d to dispose list",
-					    atomic_read(&dquot->dq_count));
-#endif
-			spin_lock(&dqopt->dq_list_lock);
-			/* As dquot must have currently users it can't be on
-			 * the free list... */
-			list_add(&dquot->dq_free, tofree_head);
-			spin_unlock(&dqopt->dq_list_lock);
-			return 1;
-		}
-		else
-			dqput(dquot);   /* We have guaranteed we won't block */
-	}
-	return 0;
-}
-
-/*
  * Free list of dquots
  * Dquots are removed from inodes and no new references can be got so we are
  * the only ones holding reference
@@ -1087,20 +1033,35 @@ static void remove_dquot_ref(struct super_block *sb, int type,
 {
 	struct inode *inode;
 	int reserved = 0;
-
+	struct dquot *dquot;
 	spin_lock(&inode_lock);
 	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
 		/*
 		 *  We have to scan also I_NEW inodes because they can already
 		 *  have quota pointer initialized. Luckily, we need to touch
 		 *  only quota pointers and these have separate locking
-		 *  (dqptr_sem).
+		 *  (i_lock).
 		 */
+		spin_lock(&inode->i_lock);
 		if (!IS_NOQUOTA(inode)) {
-			if (unlikely(inode_get_rsv_space(inode) > 0))
+			if (unlikely(__inode_get_rsv_space(inode) > 0))
 				reserved = 1;
-			remove_inode_dquot_ref(inode, type, tofree_head);
+			dquot = inode->i_dquot[type];
+			inode->i_dquot[type] = NULL;
+			/*
+			 * As dquot must have currently users it can't be on
+			 * the dq_free_list we can use ->dq_free here.
+			 * If dquot already in the list than we already deffer
+			 * one dqput() call, so dqput() can not block
+			 */
+			if (dquot) {
+				if (list_empty(&dquot->dq_free))
+					list_add(&dquot->dq_free, tofree_head);
+				else
+					dqput(dquot);
+			}
 		}
+		spin_unlock(&inode->i_lock);
 	}
 	spin_unlock(&inode_lock);
 #ifdef CONFIG_QUOTA_DEBUG
@@ -1118,9 +1079,7 @@ static void drop_dquot_ref(struct super_block *sb, int type)
 	LIST_HEAD(tofree_head);
 
 	if (dqctl(sb)->dq_op) {
-		down_write(&dqopts(sb)->dqptr_sem);
 		remove_dquot_ref(sb, type, &tofree_head);
-		up_write(&dqopts(sb)->dqptr_sem);
 		put_dquot_list(&tofree_head);
 	}
 }
@@ -1434,29 +1393,21 @@ static int dquot_active(const struct inode *inode)
 /*
  * Initialize quota pointers in inode
  *
- * We do things in a bit complicated way but by that we avoid calling
- * find_get_dquot() and thus filesystem callbacks under dqptr_sem.
- *
  * It is better to call this function outside of any transaction as it
  * might need a lot of space in journal for dquot structure allocation.
  */
 static void __dquot_initialize(struct inode *inode, int type)
 {
 	unsigned int id = 0;
-	int cnt, idx;
+	int cnt;
 	struct dquot *got[MAXQUOTAS];
 	struct super_block *sb = inode->i_sb;
 	qsize_t rsv;
 
 	/* First test before acquiring mutex - solves deadlocks when we
          * re-enter the quota code and are already holding the mutex */
-	rcu_read_lock();
-	if (!dquot_active(inode)) {
-		rcu_read_unlock();
+	if (!dquot_active(inode))
 		return;
-	}
-	idx = srcu_read_lock(&dqopts(sb)->dq_srcu);
-	rcu_read_unlock();
 	/* First get references to structures we might need. */
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
 		got[cnt] = NULL;
@@ -1473,7 +1424,8 @@ static void __dquot_initialize(struct inode *inode, int type)
 		got[cnt] = find_get_dquot(sb, id, cnt);
 	}
 
-	down_write(&dqopts(sb)->dqptr_sem);
+	spin_lock(&inode->i_lock);
+	rsv = __inode_get_rsv_space(inode);
 	if (IS_NOQUOTA(inode))
 		goto out_err;
 
@@ -1493,7 +1445,6 @@ static void __dquot_initialize(struct inode *inode, int type)
 			 * Make quota reservation system happy if someone
 			 * did a write before quota was turned on
 			 */
-			rsv = inode_get_rsv_space(inode);
 			if (unlikely(rsv)) {
 				spin_lock(&inode->i_dquot[cnt]->dq_lock);
 				dquot_resv_space(inode->i_dquot[cnt], rsv);
@@ -1502,8 +1453,7 @@ static void __dquot_initialize(struct inode *inode, int type)
 		}
 	}
 out_err:
-	up_write(&dqopts(sb)->dqptr_sem);
-	srcu_read_unlock(&dqopts(sb)->dq_srcu, idx);
+	spin_unlock(&inode->i_lock);
 	/* Drop unused references */
 	dqput_all(got);
 }
@@ -1535,12 +1485,12 @@ static void __dquot_drop(struct inode *inode)
 	int cnt;
 	struct dquot *put[MAXQUOTAS];
 
-	down_write(&dqopts(inode->i_sb)->dqptr_sem);
+	spin_lock(&inode->i_lock);
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
 		put[cnt] = inode->i_dquot[cnt];
 		inode->i_dquot[cnt] = NULL;
 	}
-	up_write(&dqopts(inode->i_sb)->dqptr_sem);
+	spin_unlock(&inode->i_lock);
 	dqput_all(put);
 }
 
@@ -1641,15 +1591,6 @@ static void __inode_incr_space(struct inode *inode, qsize_t number,
 		__inode_add_bytes(inode, number);
 }
 
-static void inode_incr_space(struct inode *inode, qsize_t number,
-				int reserve)
-{
-	spin_lock(&inode->i_lock);
-	__inode_incr_space(inode, number, reserve);
-	spin_unlock(&inode->i_lock);
-}
-
-
 static void __inode_decr_space(struct inode *inode, qsize_t number, int reserve)
 {
 	if (reserve)
@@ -1657,12 +1598,6 @@ static void __inode_decr_space(struct inode *inode, qsize_t number, int reserve)
 	else
 		__inode_sub_bytes(inode, number);
 }
-static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
-{
-	spin_lock(&inode->i_lock);
-	__inode_decr_space(inode, number, reserve);
-	spin_unlock(&inode->i_lock);
-}
 
 /*
  * This functions updates i_blocks+i_bytes fields and quota information
@@ -1679,25 +1614,19 @@ static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
  */
 int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
 {
-	int cnt, idx, ret = 0;
+	int cnt, ret = 0;
 	char warntype[MAXQUOTAS];
 	int warn = flags & DQUOT_SPACE_WARN;
 	int reserve = flags & DQUOT_SPACE_RESERVE;
 	int nofail = flags & DQUOT_SPACE_NOFAIL;
+	struct dquot *dquot[MAXQUOTAS] = {};
 
-	/*
-	 * First test before acquiring mutex - solves deadlocks when we
-	 * re-enter the quota code and are already holding the mutex
-	 */
-	rcu_read_lock();
+	spin_lock(&inode->i_lock);
 	if (!dquot_active(inode)) {
-		inode_incr_space(inode, number, reserve);
-		rcu_read_unlock();
+		__inode_incr_space(inode, number, reserve);
+		spin_unlock(&inode->i_lock);
 		goto out;
 	}
-	idx = srcu_read_lock(&dqopts(inode->i_sb)->dq_srcu);
-	rcu_read_unlock();
-	down_read(&dqopts(inode->i_sb)->dqptr_sem);
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
 		warntype[cnt] = QUOTA_NL_NOWARN;
 
@@ -1705,10 +1634,13 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
 		if (!inode->i_dquot[cnt])
 			continue;
+		dquot[cnt] = inode->i_dquot[cnt];
+		dqget(dquot[cnt]);
 		ret = check_bdq(inode->i_dquot[cnt], number, !warn,
 				warntype + cnt);
 		if (ret && !nofail) {
 			unlock_inode_dquots(inode->i_dquot);
+			spin_unlock(&inode->i_lock);
 			goto out_flush_warn;
 		}
 	}
@@ -1720,16 +1652,15 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
 		else
 			dquot_incr_space(inode->i_dquot[cnt], number);
 	}
-	inode_incr_space(inode, number, reserve);
+	__inode_incr_space(inode, number, reserve);
 	unlock_inode_dquots(inode->i_dquot);
-
-	if (reserve)
-		goto out_flush_warn;
-	mark_all_dquot_dirty(inode->i_dquot);
+	spin_unlock(&inode->i_lock);
+	if (!reserve)
+		mark_all_dquot_dirty(dquot);
 out_flush_warn:
-	flush_warnings(inode->i_dquot, warntype);
-	up_read(&dqopts(inode->i_sb)->dqptr_sem);
-	srcu_read_unlock(&dqopts(inode->i_sb)->dq_srcu, idx);
+
+	flush_warnings(dquot, warntype);
+	dqput_all(dquot);
 out:
 	return ret;
 }
@@ -1738,28 +1669,26 @@ EXPORT_SYMBOL(__dquot_alloc_space);
 /*
  * This operation can block, but only after everything is updated
  */
-int dquot_alloc_inode(const struct inode *inode)
+int dquot_alloc_inode(struct inode *inode)
 {
-	int cnt, idx, ret = 0;
+	int cnt, ret = 0;
 	char warntype[MAXQUOTAS];
+	struct dquot *dquot[MAXQUOTAS] = {};
 
-	/* First test before acquiring mutex - solves deadlocks when we
-         * re-enter the quota code and are already holding the mutex */
-	rcu_read_lock();
+	spin_lock(&inode->i_lock);
 	if (!dquot_active(inode)) {
-		rcu_read_unlock();
+		spin_unlock(&inode->i_lock);
 		return 0;
 	}
-	idx = srcu_read_lock(&dqopts(inode->i_sb)->dq_srcu);
-	rcu_read_unlock();
-
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
 		warntype[cnt] = QUOTA_NL_NOWARN;
-	down_read(&dqopts(inode->i_sb)->dqptr_sem);
+
 	lock_inode_dquots(inode->i_dquot);
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
 		if (!inode->i_dquot[cnt])
 			continue;
+		dquot[cnt] = inode->i_dquot[cnt];
+		dqget(dquot[cnt]);
 		ret = check_idq(inode->i_dquot[cnt], 1, warntype + cnt);
 		if (ret)
 			goto warn_put_all;
@@ -1773,11 +1702,11 @@ int dquot_alloc_inode(const struct inode *inode)
 
 warn_put_all:
 	unlock_inode_dquots(inode->i_dquot);
+	spin_unlock(&inode->i_lock);
 	if (ret == 0)
-		mark_all_dquot_dirty(inode->i_dquot);
-	flush_warnings(inode->i_dquot, warntype);
-	up_read(&dqopts(inode->i_sb)->dqptr_sem);
-	srcu_read_unlock(&dqopts(inode->i_sb)->dq_srcu, idx);
+		mark_all_dquot_dirty(dquot);
+	flush_warnings(dquot, warntype);
+	dqput_all(dquot);
 	return ret;
 }
 EXPORT_SYMBOL(dquot_alloc_inode);
@@ -1787,30 +1716,30 @@ EXPORT_SYMBOL(dquot_alloc_inode);
  */
 int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
 {
-	int cnt, idx;
+	int cnt;
+	struct dquot *dquot[MAXQUOTAS] = {};
 
-	rcu_read_lock();
+	spin_lock(&inode->i_lock);
 	if (!dquot_active(inode)) {
-		inode_claim_rsv_space(inode, number);
-		rcu_read_unlock();
+		__inode_claim_rsv_space(inode, number);
+		spin_unlock(&inode->i_lock);
 		return 0;
 	}
-	idx = srcu_read_lock(&dqopts(inode->i_sb)->dq_srcu);
-	rcu_read_unlock();
-	down_read(&dqopts(inode->i_sb)->dqptr_sem);
 	lock_inode_dquots(inode->i_dquot);
 	/* Claim reserved quotas to allocated quotas */
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
-		if (inode->i_dquot[cnt])
-			dquot_claim_reserved_space(inode->i_dquot[cnt],
-							number);
+		if (inode->i_dquot[cnt]) {
+			dquot[cnt] = inode->i_dquot[cnt];
+			dqget(dquot[cnt]);
+			dquot_claim_reserved_space(inode->i_dquot[cnt], number);
+		}
 	}
 	/* Update inode bytes */
-	inode_claim_rsv_space(inode, number);
+	__inode_claim_rsv_space(inode, number);
 	unlock_inode_dquots(inode->i_dquot);
-	mark_all_dquot_dirty(inode->i_dquot);
-	up_read(&dqopts(inode->i_sb)->dqptr_sem);
-	srcu_read_unlock(&dqopts(inode->i_sb)->dq_srcu, idx);
+	spin_unlock(&inode->i_lock);
+	mark_all_dquot_dirty(dquot);
+	dqput_all(dquot);
 	return 0;
 }
 EXPORT_SYMBOL(dquot_claim_space_nodirty);
@@ -1820,75 +1749,68 @@ EXPORT_SYMBOL(dquot_claim_space_nodirty);
  */
 void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
 {
-	unsigned int cnt, idx;
+	unsigned int cnt;
 	char warntype[MAXQUOTAS];
 	int reserve = flags & DQUOT_SPACE_RESERVE;
+	struct dquot *dquot[MAXQUOTAS] = {};
 
-	/* First test before acquiring mutex - solves deadlocks when we
-         * re-enter the quota code and are already holding the mutex */
-	rcu_read_lock();
+	spin_lock(&inode->i_lock);
 	if (!dquot_active(inode)) {
-		inode_decr_space(inode, number, reserve);
-		rcu_read_unlock();
+		__inode_decr_space(inode, number, reserve);
+		spin_unlock(&inode->i_lock);
 		return;
 	}
-
-	idx = srcu_read_lock(&dqopts(inode->i_sb)->dq_srcu);
-	rcu_read_unlock();
-	down_read(&dqopts(inode->i_sb)->dqptr_sem);
 	lock_inode_dquots(inode->i_dquot);
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
 		if (!inode->i_dquot[cnt])
 			continue;
+		dquot[cnt] = inode->i_dquot[cnt];
+		dqget(dquot[cnt]);
 		warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
 		if (reserve)
 			dquot_free_reserved_space(inode->i_dquot[cnt], number);
 		else
 			dquot_decr_space(inode->i_dquot[cnt], number);
 	}
-	inode_decr_space(inode, number, reserve);
+	__inode_decr_space(inode, number, reserve);
 	unlock_inode_dquots(inode->i_dquot);
-
-	if (reserve)
-		goto out_unlock;
-	mark_all_dquot_dirty(inode->i_dquot);
-out_unlock:
-	flush_warnings(inode->i_dquot, warntype);
-	up_read(&dqopts(inode->i_sb)->dqptr_sem);
-	srcu_read_unlock(&dqopts(inode->i_sb)->dq_srcu, idx);
+	spin_unlock(&inode->i_lock);
+	if (!reserve)
+		mark_all_dquot_dirty(dquot);
+	flush_warnings(dquot, warntype);
+	dqput_all(dquot);
 }
 EXPORT_SYMBOL(__dquot_free_space);
 
 /*
  * This operation can block, but only after everything is updated
  */
-void dquot_free_inode(const struct inode *inode)
+void dquot_free_inode(struct inode *inode)
 {
-	unsigned int cnt, idx;
+	unsigned int cnt;
 	char warntype[MAXQUOTAS];
+	struct dquot *dquot[MAXQUOTAS] = {};
 
-	/* First test before acquiring mutex - solves deadlocks when we
-         * re-enter the quota code and are already holding the mutex */
-	rcu_read_lock();
+	spin_lock(&inode->i_lock);
 	if (!dquot_active(inode)) {
-		rcu_read_unlock();
+		spin_unlock(&inode->i_lock);
 		return;
 	}
-	idx = srcu_read_lock(&dqopts(inode->i_sb)->dq_srcu);
-	rcu_read_unlock();
-	down_read(&dqopts(inode->i_sb)->dqptr_sem);
+
 	lock_inode_dquots(inode->i_dquot);
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
 		if (!inode->i_dquot[cnt])
 			continue;
+		dquot[cnt] = inode->i_dquot[cnt];
+		dqget(dquot[cnt]);
 		warntype[cnt] = info_idq_free(inode->i_dquot[cnt], 1);
 		dquot_decr_inodes(inode->i_dquot[cnt], 1);
 	}
 	unlock_inode_dquots(inode->i_dquot);
-	mark_all_dquot_dirty(inode->i_dquot);
-	flush_warnings(inode->i_dquot, warntype);
-	up_read(&dqopts(inode->i_sb)->dqptr_sem);
-	srcu_read_unlock(&dqopts(inode->i_sb)->dq_srcu, idx);
+	spin_unlock(&inode->i_lock);
+	mark_all_dquot_dirty(dquot);
+	flush_warnings(dquot, warntype);
+	dqput_all(dquot);
 }
 EXPORT_SYMBOL(dquot_free_inode);
 
@@ -1907,36 +1829,27 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
 	qsize_t space, cur_space;
 	qsize_t rsv_space = 0;
 	struct dquot *transfer_from[MAXQUOTAS] = {};
-	int cnt, idx, ret = 0;
+	int cnt, ret = 0;
 	char is_valid[MAXQUOTAS] = {};
 	char warntype_to[MAXQUOTAS];
 	char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS];
 
-	/* First test before acquiring mutex - solves deadlocks when we
-         * re-enter the quota code and are already holding the mutex */
-	rcu_read_lock();
+	spin_lock(&inode->i_lock);
 	if (!dquot_active(inode)) {
-		rcu_read_unlock();
+		spin_unlock(&inode->i_lock);
 		return 0;
 	}
 	/* Initialize the arrays */
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
 		warntype_to[cnt] = QUOTA_NL_NOWARN;
 
-	idx = srcu_read_lock(&dqopts(inode->i_sb)->dq_srcu);
-	rcu_read_unlock();
-	down_write(&dqopts(inode->i_sb)->dqptr_sem);
-	if (IS_NOQUOTA(inode)) {	/* File without quota accounting? */
-		up_write(&dqopts(inode->i_sb)->dqptr_sem);
-		srcu_read_unlock(&dqopts(inode->i_sb)->dq_srcu, idx);
-		return 0;
-	}
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
 		/*
 		 * Skip changes for same uid or gid or for turned off quota-type.
 		 */
 		if (!transfer_to[cnt])
 			continue;
+		dqget(transfer_to[cnt]);
 		/* Avoid races with quotaoff() */
 		if (!sb_has_quota_active(inode->i_sb, cnt))
 			continue;
@@ -1946,10 +1859,13 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
 			continue;
 		is_valid[cnt] = 1;
 		transfer_from[cnt] = inode->i_dquot[cnt];
+		if (transfer_from[cnt])
+			dqget(transfer_from[cnt]);
+
 	}
 	lock_dquot_double(transfer_from, transfer_to);
-	cur_space = inode_get_bytes(inode);
-	rsv_space = inode_get_rsv_space(inode);
+	cur_space = __inode_get_bytes(inode);
+	rsv_space = __inode_get_rsv_space(inode);
 	space = cur_space + rsv_space;
 	/* Build the transfer_from list and check the limits */
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1989,13 +1905,15 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
 	}
 	unlock_inode_dquots(transfer_to);
 	unlock_inode_dquots(transfer_from);
-	up_write(&dqopts(inode->i_sb)->dqptr_sem);
-	srcu_read_unlock(&dqopts(inode->i_sb)->dq_srcu, idx);
+	spin_unlock(&inode->i_lock);
+
 	mark_all_dquot_dirty(transfer_from);
 	mark_all_dquot_dirty(transfer_to);
 	flush_warnings(transfer_to, warntype_to);
 	flush_warnings(transfer_from, warntype_from_inodes);
 	flush_warnings(transfer_from, warntype_from_space);
+	dqput_all(transfer_to);
+	dqput_all(transfer_from);
 	/* Pass back references to put */
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++)
 		if (is_valid[cnt])
@@ -2004,9 +1922,10 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
 over_quota:
 	unlock_inode_dquots(transfer_to);
 	unlock_inode_dquots(transfer_from);
-	up_write(&dqopts(inode->i_sb)->dqptr_sem);
-	srcu_read_unlock(&dqopts(inode->i_sb)->dq_srcu, idx);
+	spin_unlock(&inode->i_lock);
 	flush_warnings(transfer_to, warntype_to);
+	dqput_all(transfer_to);
+	dqput_all(transfer_from);
 	return ret;
 }
 EXPORT_SYMBOL(__dquot_transfer);
@@ -2109,7 +2028,6 @@ static int alloc_quota_info(struct quota_ctl_info *dqctl) {
 		return err;
 	}
 	mutex_init(&dqopt->dqio_mutex);
-	init_rwsem(&dqopt->dqptr_sem);
 	spin_lock_init(&dqopt->dq_list_lock);
 	spin_lock_init(&dqopt->dq_data_lock);
 	INIT_LIST_HEAD(&dqopt->dq_inuse_list);
@@ -2247,8 +2165,10 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
 			if (!sb_has_quota_loaded(sb, cnt)) {
 				mutex_lock_nested(&toputinode[cnt]->i_mutex,
 						  I_MUTEX_QUOTA);
+				spin_lock(&toputinode[cnt]->i_lock);
 				toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
 				  S_NOATIME | S_NOQUOTA);
+				spin_unlock(&toputinode[cnt]->i_lock);
 				truncate_inode_pages(&toputinode[cnt]->i_data,
 						     0);
 				mutex_unlock(&toputinode[cnt]->i_mutex);
@@ -2343,9 +2263,11 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
 		 * possible) Also nobody should write to the file - we use
 		 * special IO operations which ignore the immutable bit. */
 		mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
+		spin_lock(&inode->i_lock);
 		oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
 					     S_NOQUOTA);
 		inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
+		spin_unlock(&inode->i_lock);
 		mutex_unlock(&inode->i_mutex);
 		/*
 		 * When S_NOQUOTA is set, remove dquot references as no more
@@ -2387,8 +2309,10 @@ out_lock:
 		mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
 		/* Set the flags back (in the case of accidental quotaon()
 		 * on a wrong file we don't want to mess up the flags) */
+		spin_lock(&inode->i_lock);
 		inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
 		inode->i_flags |= oldflags;
+		spin_unlock(&inode->i_lock);
 		mutex_unlock(&inode->i_mutex);
 	}
 	/* We have failed to enable quota, so quota flags doesn't changed.
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 949347a..f39a756 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -427,8 +427,6 @@ struct quota_info {
 	struct inode *files[MAXQUOTAS];	/* inodes of quotafiles */
 	const struct quota_format_ops *fmt_ops[MAXQUOTAS];	/* Operations for each type */
 	struct srcu_struct dq_srcu;	/* use count read lock */
-	struct rw_semaphore dqptr_sem;	/* serialize ops using quota_info struct, pointers from inode to dquots */
-
 };
 
 int register_quota_format(struct quota_format_type *fmt);
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 93e39c6..c19a904 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -64,10 +64,10 @@ void dquot_destroy(struct dquot *dquot);
 int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags);
 void __dquot_free_space(struct inode *inode, qsize_t number, int flags);
 
-int dquot_alloc_inode(const struct inode *inode);
+int dquot_alloc_inode(struct inode *inode);
 
 int dquot_claim_space_nodirty(struct inode *inode, qsize_t number);
-void dquot_free_inode(const struct inode *inode);
+void dquot_free_inode(struct inode *inode);
 
 int dquot_disable(struct super_block *sb, int type, unsigned int flags);
 /* Suspend quotas on remount RO */
-- 
1.6.5.2

--
To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [Linux Ext4 Filesystem]     [Union Filesystem]     [Filesystem Testing]     [Ceph Users]     [Ecryptfs]     [AutoFS]     [Kernel Newbies]     [Share Photos]     [Security]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux Cachefs]     [Reiser Filesystem]     [Linux RAID]     [Samba]     [Device Mapper]     [CEPH Development]
  Powered by Linux