Generic pcpu counter has some memory overhead but it is negligible for modern systems, and embedded systems compiles without quota support. And code reuse is always good thing. This patch should fix complain from preemptive kernels which was introduced by dde9588853b1bde --- fs/quota/dquot.c | 60 +++++++++++++++--------------------------------- include/linux/quota.h | 28 ++++------------------ 2 files changed, 24 insertions(+), 64 deletions(-) diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 655a4c5..2b54548 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -228,10 +228,6 @@ static struct hlist_head *dquot_hash; struct dqstats dqstats; EXPORT_SYMBOL(dqstats); -#ifdef CONFIG_SMP -struct dqstats *dqstats_pcpu; -EXPORT_SYMBOL(dqstats_pcpu); -#endif static qsize_t inode_get_rsv_space(struct inode *inode); static void __dquot_initialize(struct inode *inode, int type); @@ -676,22 +672,6 @@ static void prune_dqcache(int count) } } -static int dqstats_read(unsigned int type) -{ - int count = 0; -#ifdef CONFIG_SMP - int cpu; - for_each_possible_cpu(cpu) - count += per_cpu_ptr(dqstats_pcpu, cpu)->stat[type]; - /* Statistics reading is racy, but absolute accuracy isn't required */ - if (count < 0) - count = 0; -#else - count = dqstats.stat[type]; -#endif - return count; -} - /* * This is called from kswapd when we think we need some * more memory @@ -704,7 +684,8 @@ static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) prune_dqcache(nr); spin_unlock(&dq_list_lock); } - return (dqstats_read(DQST_FREE_DQUOTS)/100) * sysctl_vfs_cache_pressure; + return (percpu_counter_sum_positive(dqstats.cntp + DQST_FREE_DQUOTS) / + 100) * sysctl_vfs_cache_pressure; } static struct shrinker dqcache_shrinker = { @@ -2509,68 +2490,66 @@ const struct quotactl_ops vfs_quotactl_ops = { static int do_proc_dqstats(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { -#ifdef CONFIG_SMP /* Update global table */ - unsigned int type = (int *)table->data - dqstats.stat; - dqstats.stat[type] = dqstats_read(type); -#endif - return proc_dointvec(table, write, buffer, lenp, ppos); + unsigned int type = (s64 *)table->data - dqstats.stat; + dqstats.stat[type] = percpu_counter_sum_positive(dqstats.cntp + type); + return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); } static ctl_table fs_dqstats_table[] = { { .procname = "lookups", .data = &dqstats.stat[DQST_LOOKUPS], - .maxlen = sizeof(int), + .maxlen = sizeof(s64), .mode = 0444, .proc_handler = do_proc_dqstats, }, { .procname = "drops", .data = &dqstats.stat[DQST_DROPS], - .maxlen = sizeof(int), + .maxlen = sizeof(s64), .mode = 0444, .proc_handler = do_proc_dqstats, }, { .procname = "reads", .data = &dqstats.stat[DQST_READS], - .maxlen = sizeof(int), + .maxlen = sizeof(s64), .mode = 0444, .proc_handler = do_proc_dqstats, }, { .procname = "writes", .data = &dqstats.stat[DQST_WRITES], - .maxlen = sizeof(int), + .maxlen = sizeof(s64), .mode = 0444, .proc_handler = do_proc_dqstats, }, { .procname = "cache_hits", .data = &dqstats.stat[DQST_CACHE_HITS], - .maxlen = sizeof(int), + .maxlen = sizeof(s64), .mode = 0444, .proc_handler = do_proc_dqstats, }, { .procname = "allocated_dquots", .data = &dqstats.stat[DQST_ALLOC_DQUOTS], - .maxlen = sizeof(int), + .maxlen = sizeof(s64), .mode = 0444, .proc_handler = do_proc_dqstats, }, { .procname = "free_dquots", .data = &dqstats.stat[DQST_FREE_DQUOTS], - .maxlen = sizeof(int), + .maxlen = sizeof(s64), .mode = 0444, .proc_handler = do_proc_dqstats, }, { .procname = "syncs", .data = &dqstats.stat[DQST_SYNCS], - .maxlen = sizeof(int), + .maxlen = sizeof(s64), .mode = 0444, .proc_handler = do_proc_dqstats, }, @@ -2606,7 +2585,7 @@ static ctl_table sys_table[] = { static int __init dquot_init(void) { - int i; + int i, ret; unsigned long nr_hash, order; printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__); @@ -2624,12 +2603,11 @@ static int __init dquot_init(void) if (!dquot_hash) panic("Cannot create dquot hash table"); -#ifdef CONFIG_SMP - dqstats_pcpu = alloc_percpu(struct dqstats); - if (!dqstats_pcpu) - panic("Cannot create dquot stats table"); -#endif - memset(&dqstats, 0, sizeof(struct dqstats)); + for(i = 0; i < _DQST_DQSTAT_LAST; i++) { + ret = percpu_counter_init(dqstats.cntp + i, 0); + if (ret) + panic("Cannot create dquot stat counters"); + } /* Find power-of-two hlist_heads which can fit into allocation */ nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head); diff --git a/include/linux/quota.h b/include/linux/quota.h index 7126a15..edd57d9 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -174,8 +174,7 @@ enum { #include <linux/rwsem.h> #include <linux/spinlock.h> #include <linux/wait.h> -#include <linux/percpu.h> -#include <linux/smp.h> +#include <linux/percpu_counter.h> #include <linux/dqblk_xfs.h> #include <linux/dqblk_v1.h> @@ -253,29 +252,12 @@ enum { }; struct dqstats { - int stat[_DQST_DQSTAT_LAST]; + struct percpu_counter cntp[_DQST_DQSTAT_LAST]; + s64 stat[_DQST_DQSTAT_LAST]; }; - -extern struct dqstats *dqstats_pcpu; extern struct dqstats dqstats; - -static inline void dqstats_inc(unsigned int type) -{ -#ifdef CONFIG_SMP - per_cpu_ptr(dqstats_pcpu, smp_processor_id())->stat[type]++; -#else - dqstats.stat[type]++; -#endif -} - -static inline void dqstats_dec(unsigned int type) -{ -#ifdef CONFIG_SMP - per_cpu_ptr(dqstats_pcpu, smp_processor_id())->stat[type]--; -#else - dqstats.stat[type]--; -#endif -} +#define dqstats_inc(type) percpu_counter_inc(&dqstats.cntp[(type)]) +#define dqstats_dec(type) percpu_counter_dec(&dqstats.cntp[(type)]) #define DQ_MOD_B 0 /* dquot modified since read */ #define DQ_BLKS_B 1 /* uid/gid has been warned about blk limit */ -- 1.6.6.1 -- To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html