Re: [RFC PATCH -v2] percpu_counters: make fbc->count read atomic on 32 bit architecture

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, 2008-08-25 at 16:50 +0530, Aneesh Kumar K.V wrote:
> fbc->count is of type s64. The change was introduced by
> 0216bfcffe424a5473daa4da47440881b36c1f4 which changed the type
> from long to s64. Moving to s64 also means on 32 bit architectures
> we can get wrong values on fbc->count. Since fbc->count is read
> more frequently and updated rarely use seqlocks. This should
> reduce the impact of locking in the read path for 32bit arch.
> 
> percpu_counter_read is used within interrupt context also. So
> use the irq safe version of seqlock while reading
> 
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@xxxxxxxxxxxxxxxxxx>
> CC: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx>
> CC: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
> ---
>  include/linux/percpu_counter.h |   29 +++++++++++++++++++++++++----
>  lib/percpu_counter.c           |   20 ++++++++++----------
>  2 files changed, 35 insertions(+), 14 deletions(-)
> 
> diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
> index 9007ccd..36f3d2d 100644
> --- a/include/linux/percpu_counter.h
> +++ b/include/linux/percpu_counter.h
> @@ -6,7 +6,7 @@
>   * WARNING: these things are HUGE.  4 kbytes per counter on 32-way P4.
>   */
>  
> -#include <linux/spinlock.h>
> +#include <linux/seqlock.h>
>  #include <linux/smp.h>
>  #include <linux/list.h>
>  #include <linux/threads.h>
> @@ -16,7 +16,7 @@
>  #ifdef CONFIG_SMP
>  
>  struct percpu_counter {
> -	spinlock_t lock;
> +	seqlock_t lock;
>  	s64 count;
>  #ifdef CONFIG_HOTPLUG_CPU
>  	struct list_head list;	/* All percpu_counters are on a list */
> @@ -53,10 +53,31 @@ static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
>  	return __percpu_counter_sum(fbc);
>  }
>  
> -static inline s64 percpu_counter_read(struct percpu_counter *fbc)
> +#if BITS_PER_LONG == 64
> +static inline s64 fbc_count(struct percpu_counter *fbc)
>  {
>  	return fbc->count;
>  }
> +#else
> +/* doesn't have atomic 64 bit operation */
> +static inline s64 fbc_count(struct percpu_counter *fbc)
> +{
> +	s64 ret;
> +	unsigned seq;
> +	unsigned long flags;
> +	do {
> +		seq = read_seqbegin_irqsave(&fbc->lock, flags);
> +		ret = fbc->count;
> +	} while(read_seqretry_irqrestore(&fbc->lock, seq, flags));

Do we really need to disabled IRQs here? It seems to me the worst that
can happen is that the IRQ will change ->count and increase the sequence
number a bit - a case that is perfectly handled by the current retry
logic.

And not doing the IRQ flags bit saves a lot of time on some archs.

> +	return ret;
> +
> +}
> +#endif
> +
> +static inline s64 percpu_counter_read(struct percpu_counter *fbc)
> +{
> +	return fbc_count(fbc);
> +}
>  
>  /*
>   * It is possible for the percpu_counter_read() to return a small negative
> @@ -65,7 +86,7 @@ static inline s64 percpu_counter_read(struct percpu_counter *fbc)
>   */
>  static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
>  {
> -	s64 ret = fbc->count;
> +	s64 ret = fbc_count(fbc);
>  
>  	barrier();		/* Prevent reloads of fbc->count */
>  	if (ret >= 0)
> diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
> index a866389..83bb809 100644
> --- a/lib/percpu_counter.c
> +++ b/lib/percpu_counter.c
> @@ -18,13 +18,13 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
>  {
>  	int cpu;
>  
> -	spin_lock(&fbc->lock);
> +	write_seqlock(&fbc->lock);
>  	for_each_possible_cpu(cpu) {
>  		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
>  		*pcount = 0;
>  	}
>  	fbc->count = amount;
> -	spin_unlock(&fbc->lock);
> +	write_sequnlock(&fbc->lock);
>  }
>  EXPORT_SYMBOL(percpu_counter_set);
>  
> @@ -37,10 +37,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
>  	pcount = per_cpu_ptr(fbc->counters, cpu);
>  	count = *pcount + amount;
>  	if (count >= batch || count <= -batch) {
> -		spin_lock(&fbc->lock);
> +		write_seqlock(&fbc->lock);
>  		fbc->count += count;
>  		*pcount = 0;
> -		spin_unlock(&fbc->lock);
> +		write_sequnlock(&fbc->lock);
>  	} else {
>  		*pcount = count;
>  	}
> @@ -57,7 +57,7 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
>  	s64 ret;
>  	int cpu;
>  
> -	spin_lock(&fbc->lock);
> +	write_seqlock(&fbc->lock);
>  	ret = fbc->count;
>  	for_each_online_cpu(cpu) {
>  		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
> @@ -66,7 +66,7 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
>  	}
>  	fbc->count = ret;
>  
> -	spin_unlock(&fbc->lock);
> +	write_sequnlock(&fbc->lock);
>  	return ret;
>  }
>  EXPORT_SYMBOL(__percpu_counter_sum);
> @@ -75,7 +75,7 @@ EXPORT_SYMBOL(__percpu_counter_sum);
>  
>  int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
>  {
> -	spin_lock_init(&fbc->lock);
> +	seqlock_init(&fbc->lock);
>  	fbc->count = amount;
>  	fbc->counters = alloc_percpu(s32);
>  	if (!fbc->counters)
> @@ -95,7 +95,7 @@ int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount)
>  
>  	err = percpu_counter_init(fbc, amount);
>  	if (!err)
> -		lockdep_set_class(&fbc->lock, &percpu_counter_irqsafe);
> +		lockdep_set_class(&fbc->lock.lock, &percpu_counter_irqsafe);
>  	return err;
>  }
>  
> @@ -130,11 +130,11 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
>  		s32 *pcount;
>  		unsigned long flags;
>  
> -		spin_lock_irqsave(&fbc->lock, flags);
> +		write_seqlock_irqsave(&fbc->lock, flags);
>  		pcount = per_cpu_ptr(fbc->counters, cpu);
>  		fbc->count += *pcount;
>  		*pcount = 0;
> -		spin_unlock_irqrestore(&fbc->lock, flags);
> +		write_sequnlock_irqrestore(&fbc->lock, flags);
>  	}
>  	mutex_unlock(&percpu_counters_lock);
>  	return NOTIFY_OK;

--
To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Reiser Filesystem Development]     [Ceph FS]     [Kernel Newbies]     [Security]     [Netfilter]     [Bugtraq]     [Linux FS]     [Yosemite National Park]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Samba]     [Device Mapper]     [Linux Media]

  Powered by Linux