Introduce a function that allows to read the value of a per-cpu counter. This function will be used in the next patch to check whether a per-cpu counter in atomic mode has the value one. Signed-off-by: Bart Van Assche <bart.vanassche@xxxxxxx> Cc: Tejun Heo <tj@xxxxxxxxxx> Cc: Christoph Hellwig <hch@xxxxxx> Cc: Jianchao Wang <jianchao.w.wang@xxxxxxxxxx> Cc: Ming Lei <ming.lei@xxxxxxxxxx> Cc: Alan Stern <stern@xxxxxxxxxxxxxxxxxxx> Cc: Johannes Thumshirn <jthumshirn@xxxxxxx> --- include/linux/percpu-refcount.h | 2 ++ lib/percpu-refcount.c | 29 +++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 009cdf3d65b6..5707289ba828 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -331,4 +331,6 @@ static inline bool percpu_ref_is_zero(struct percpu_ref *ref) return !atomic_long_read(&ref->count); } +unsigned long percpu_ref_read(struct percpu_ref *ref); + #endif diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index 9f96fa7bc000..c0b9fc8efa6b 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c @@ -369,3 +369,32 @@ void percpu_ref_reinit(struct percpu_ref *ref) spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); } EXPORT_SYMBOL_GPL(percpu_ref_reinit); + +/** + * percpu_ref_read - read a percpu refcount + * @ref: percpu_ref to test + * + * This function is safe to call as long as @ref is between init and exit. It + * is the responsibility of the caller to handle changes of @ref concurrently + * with this function. If this function is called while @ref is in per-cpu + * mode the returned value may be incorrect if e.g. percpu_ref_get() is called + * from one CPU and percpu_ref_put() is called from another CPU. + */ +unsigned long percpu_ref_read(struct percpu_ref *ref) +{ + unsigned long __percpu *percpu_count; + unsigned long sum = 0; + int cpu; + + rcu_read_lock_sched(); + if (__ref_is_percpu(ref, &percpu_count)) { + for_each_possible_cpu(cpu) + sum += *per_cpu_ptr(percpu_count, cpu); + } + rcu_read_unlock_sched(); + sum += atomic_long_read(&ref->count); + sum &= ~PERCPU_COUNT_BIAS; + + return sum; +} +EXPORT_SYMBOL_GPL(percpu_ref_read); -- 2.18.0