On Thu 11-11-10 15:14:27, Dmitry Monakhov wrote: > Currently quota lists are global which is very bad for scalability. > * inuse_lists -> sb->s_dquot->dq_inuse_list > * free_lists -> sb->s_dquot->dq_free_lists > * Add per sb lock for quota's lists protection > > Do not remove dq_lists_lock is used now only for protecting quota_hash > > Signed-off-by: Dmitry Monakhov <dmonakhov@xxxxxxxxxx> > --- > fs/quota/dquot.c | 88 +++++++++++++++++++++++++++++++++++++++--------- > include/linux/quota.h | 3 ++ > 2 files changed, 74 insertions(+), 17 deletions(-) > > diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c > index f719a6f..d7ec471 100644 > --- a/fs/quota/dquot.c > +++ b/fs/quota/dquot.c ... > @@ -335,17 +333,20 @@ static inline int mark_dquot_dirty(struct dquot *dquot) > int dquot_mark_dquot_dirty(struct dquot *dquot) > { > int ret = 1; > + struct quota_info *dqopt = sb_dqopts(dquot); > > /* If quota is dirty already, we don't have to acquire dq_list_lock */ > if (test_bit(DQ_MOD_B, &dquot->dq_flags)) > return 1; > > spin_lock(&dq_list_lock); > + spin_lock(&dqopt->dq_list_lock); > if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) { > - list_add(&dquot->dq_dirty, &sb_dqopts(dquot)-> > - info[dquot->dq_type].dqi_dirty_list); > + list_add(&dquot->dq_dirty, > + &dqopt->info[dquot->dq_type].dqi_dirty_list); > ret = 0; > } > + spin_unlock(&dqopt->dq_list_lock); > spin_unlock(&dq_list_lock); OK, but the above code does nothing with the hash so you can remove dq_list_lock immediately, can't you? Not that it would matter too much since you remove it eventually but I'm curious... > /* Free unused dquots from cache */ > -static void prune_dqcache(int count) > +static void prune_one_sb_dqcache(struct super_block *sb, void *arg) > { > struct list_head *head; > struct dquot *dquot; > + struct quota_info *dqopt = dqopts(sb); > + int count = *(int*) arg; > > - head = free_dquots.prev; > - while (head != &free_dquots && count) { > + mutex_lock(&dqctl(sb)->dqonoff_mutex); You cannot call mutex_lock() because you already hold dq_list_lock from shrink_dqcache_memory(). If we could get away without the mutex completely, it would be really welcome. The code can be called from page-reclaim possibly holding all sorts of locks so if you cannot get rid of dqonoff_mutex, you must bail out if gfp_mask passed to shrinker does not have __GFP_FS set (which would be unfortunate). > + if (!sb_any_quota_loaded(sb)) { > + mutex_unlock(&dqctl(sb)->dqonoff_mutex); > + return; > + } > + spin_lock(&dqopt->dq_list_lock); > + head = dqopt->dq_free_list.prev; > + while (head != &dqopt->dq_free_list && count) { > dquot = list_entry(head, struct dquot, dq_free); > remove_dquot_hash(dquot); > remove_free_dquot(dquot); > remove_inuse(dquot); > do_destroy_dquot(dquot); > count--; > - head = free_dquots.prev; > + head = dqopt->dq_free_list.prev; > } > + spin_unlock(&dqopt->dq_list_lock); > + mutex_unlock(&dqctl(sb)->dqonoff_mutex); > +} > +static void prune_dqcache(int count) > +{ > + iterate_supers(prune_one_sb_dqcache, &count); > } > - > /* > * This is called from kswapd when we think we need some > * more memory Honza -- Jan Kara <jack@xxxxxxx> SUSE Labs, CR -- To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html