dq_list_lock is no longer responsible for any synchronization, get rid it completely. Signed-off-by: Dmitry Monakhov <dmonakhov@xxxxxxxxxx> --- fs/quota/dquot.c | 36 ++---------------------------------- 1 files changed, 2 insertions(+), 34 deletions(-) diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 99dc7a3..2aa8faf 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -88,7 +88,8 @@ * in inode_add_bytes() and inode_sub_bytes(). * * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock, - * dq_list_lock > sb->s_dquot->dq_list_lock > hlist_bl_head + * dq_list_lock > hlist_bl_head + * Note that some things (eg. sb pointer, type, id) doesn't change during * the life of the dquot structure and so needn't to be protected by a lock * @@ -123,7 +124,6 @@ * i_mutex on quota files is special (it's below dqio_mutex) */ -static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock); static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_fmt_lock); __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock); EXPORT_SYMBOL(dq_data_lock); @@ -338,7 +338,6 @@ int dquot_mark_dquot_dirty(struct dquot *dquot) if (test_bit(DQ_MOD_B, &dquot->dq_flags)) return 1; - spin_lock(&dq_list_lock); spin_lock(&dqopt->dq_list_lock); if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) { list_add(&dquot->dq_dirty, @@ -346,7 +345,6 @@ int dquot_mark_dquot_dirty(struct dquot *dquot) ret = 0; } spin_unlock(&dqopt->dq_list_lock); - spin_unlock(&dq_list_lock); return ret; } EXPORT_SYMBOL(dquot_mark_dquot_dirty); @@ -438,15 +436,12 @@ int dquot_commit(struct dquot *dquot) struct quota_info *dqopt = sb_dqopts(dquot); mutex_lock(&dqopt->dqio_mutex); - spin_lock(&dq_list_lock); spin_lock(&dqopt->dq_list_lock); if (!clear_dquot_dirty(dquot)) { spin_unlock(&dqopt->dq_list_lock); - spin_unlock(&dq_list_lock); goto out_sem; } spin_unlock(&dqopt->dq_list_lock); - spin_unlock(&dq_list_lock); /* Inactive dquot can be only if there was error during read/init * => we have better not writing it */ if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { @@ -518,7 +513,6 @@ static void invalidate_dquots(struct super_block *sb, int type) struct quota_info *dqopt = dqopts(sb); restart: - spin_lock(&dq_list_lock); spin_lock(&dqopt->dq_list_lock); list_for_each_entry_safe(dquot, tmp, &dqopt->dq_inuse_list, dq_inuse) { if (dquot->dq_sb != sb) @@ -533,7 +527,6 @@ restart: prepare_to_wait(&dquot->dq_wait_unused, &wait, TASK_UNINTERRUPTIBLE); spin_unlock(&dqopt->dq_list_lock); - spin_unlock(&dq_list_lock); /* Once dqput() wakes us up, we know it's time to free * the dquot. * IMPORTANT: we rely on the fact that there is always @@ -560,7 +553,6 @@ restart: do_destroy_dquot(dquot); } spin_unlock(&dqopt->dq_list_lock); - spin_unlock(&dq_list_lock); } /* Call callback for every active dquot on given filesystem */ @@ -574,7 +566,6 @@ int dquot_scan_active(struct super_block *sb, mutex_lock(&dqctl(sb)->dqonoff_mutex); dqopt = dqopts(sb); - spin_lock(&dq_list_lock); spin_lock(&dqopt->dq_list_lock); list_for_each_entry(dquot, &dqopt->dq_inuse_list, dq_inuse) { if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) @@ -584,20 +575,17 @@ int dquot_scan_active(struct super_block *sb, /* Now we have active dquot so we can just increase use count */ atomic_inc(&dquot->dq_count); spin_unlock(&dqopt->dq_list_lock); - spin_unlock(&dq_list_lock); dqstats_inc(DQST_LOOKUPS); dqput(old_dquot); old_dquot = dquot; ret = fn(dquot, priv); if (ret < 0) goto out; - spin_lock(&dq_list_lock); spin_lock(&dqopt->dq_list_lock); /* We are safe to continue now because our dquot could not * be moved out of the inuse list while we hold the reference */ } spin_unlock(&dqopt->dq_list_lock); - spin_unlock(&dq_list_lock); out: dqput(old_dquot); mutex_unlock(&dqctl(sb)->dqonoff_mutex); @@ -619,7 +607,6 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait) continue; if (!sb_has_quota_active(sb, cnt)) continue; - spin_lock(&dq_list_lock); spin_lock(&dqopt->dq_list_lock); dirty = &dqopt->info[cnt].dqi_dirty_list; while (!list_empty(dirty)) { @@ -635,15 +622,12 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait) * use count */ atomic_inc(&dquot->dq_count); spin_unlock(&dqopt->dq_list_lock); - spin_unlock(&dq_list_lock); dqstats_inc(DQST_LOOKUPS); dqctl(sb)->dq_op->write_dquot(dquot); dqput(dquot); spin_lock(&dqopt->dq_list_lock); - spin_lock(&dq_list_lock); } spin_unlock(&dqopt->dq_list_lock); - spin_unlock(&dq_list_lock); } for (cnt = 0; cnt < MAXQUOTAS; cnt++) @@ -723,9 +707,7 @@ static void prune_dqcache(int count) static int shrink_dqcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) { if (nr) { - spin_lock(&dq_list_lock); prune_dqcache(nr); - spin_unlock(&dq_list_lock); } return ((unsigned) percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS]) @@ -758,7 +740,6 @@ void dqput(struct dquot *dquot) dqopt = sb_dqopts(dquot); dqstats_inc(DQST_DROPS); we_slept: - spin_lock(&dq_list_lock); spin_lock(&dqopt->dq_list_lock); if (atomic_read(&dquot->dq_count) > 1) { /* We have more than one user... nothing to do */ @@ -768,13 +749,11 @@ we_slept: atomic_read(&dquot->dq_count) == 1) wake_up(&dquot->dq_wait_unused); spin_unlock(&dqopt->dq_list_lock); - spin_unlock(&dq_list_lock); return; } /* Need to release dquot? */ if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) { spin_unlock(&dqopt->dq_list_lock); - spin_unlock(&dq_list_lock); /* Commit dquot before releasing */ ret = dqctl(dquot->dq_sb)->dq_op->write_dquot(dquot); if (ret < 0) { @@ -785,11 +764,9 @@ we_slept: * We clear dirty bit anyway, so that we avoid * infinite loop here */ - spin_lock(&dq_list_lock); spin_lock(&dqopt->dq_list_lock); clear_dquot_dirty(dquot); spin_unlock(&dqopt->dq_list_lock); - spin_unlock(&dq_list_lock); } goto we_slept; } @@ -797,7 +774,6 @@ we_slept: clear_dquot_dirty(dquot); if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { spin_unlock(&dqopt->dq_list_lock); - spin_unlock(&dq_list_lock); dqctl(dquot->dq_sb)->dq_op->release_dquot(dquot); goto we_slept; } @@ -808,7 +784,6 @@ we_slept: #endif put_dquot_last(dquot); spin_unlock(&dqopt->dq_list_lock); - spin_unlock(&dq_list_lock); } EXPORT_SYMBOL(dqput); @@ -863,11 +838,9 @@ struct dquot *dqget(struct super_block *sb, unsigned int id, int type) idx = srcu_read_lock(&dqopt->dq_srcu); rcu_read_unlock(); we_slept: - spin_lock(&dq_list_lock); spin_lock(&dqopt->dq_list_lock); if (!sb_has_quota_active(sb, type)) { spin_unlock(&dqopt->dq_list_lock); - spin_unlock(&dq_list_lock); goto out; } hlist_bl_lock(blh); @@ -876,7 +849,6 @@ we_slept: if (!empty) { hlist_bl_unlock(blh); spin_unlock(&dqopt->dq_list_lock); - spin_unlock(&dq_list_lock); empty = get_empty_dquot(sb, type); if (!empty) schedule(); /* Try to wait for a moment... */ @@ -891,7 +863,6 @@ we_slept: /* all dquots go on the inuse_list */ put_inuse(dquot); spin_unlock(&dqopt->dq_list_lock); - spin_unlock(&dq_list_lock); dqstats_inc(DQST_LOOKUPS); } else { if (!atomic_read(&dquot->dq_count)) @@ -899,7 +870,6 @@ we_slept: atomic_inc(&dquot->dq_count); hlist_bl_unlock(blh); spin_unlock(&dqopt->dq_list_lock); - spin_unlock(&dq_list_lock); dqstats_inc(DQST_CACHE_HITS); dqstats_inc(DQST_LOOKUPS); } @@ -1016,13 +986,11 @@ static int remove_inode_dquot_ref(struct inode *inode, int type, "dq_count %d to dispose list", atomic_read(&dquot->dq_count)); #endif - spin_lock(&dq_list_lock); spin_lock(&dqopt->dq_list_lock); /* As dquot must have currently users it can't be on * the free list... */ list_add(&dquot->dq_free, tofree_head); spin_unlock(&dqopt->dq_list_lock); - spin_unlock(&dq_list_lock); return 1; } else -- 1.6.5.2 -- To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html