Add percpu_ref_atomic_count(), which returns number of references of a percpu_ref switched prior into atomic mode, so the caller is responsible to make sure it's in the right mode. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- include/linux/percpu-refcount.h | 1 + lib/percpu-refcount.c | 26 ++++++++++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 16c35a728b4c..0ff40e79efa2 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -131,6 +131,7 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref, void percpu_ref_resurrect(struct percpu_ref *ref); void percpu_ref_reinit(struct percpu_ref *ref); bool percpu_ref_is_zero(struct percpu_ref *ref); +unsigned long percpu_ref_atomic_count(struct percpu_ref *ref); /** * percpu_ref_kill - drop the initial ref diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index a1071cdefb5a..56286995e2b8 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c @@ -425,6 +425,32 @@ bool percpu_ref_is_zero(struct percpu_ref *ref) } EXPORT_SYMBOL_GPL(percpu_ref_is_zero); +/** + * percpu_ref_atomic_count - returns number of left references + * @ref: percpu_ref to test + * + * This function is safe to call as long as @ref is switch into atomic mode, + * and is between init and exit. + */ +unsigned long percpu_ref_atomic_count(struct percpu_ref *ref) +{ + unsigned long __percpu *percpu_count; + unsigned long count, flags; + + if (WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count))) + return -1UL; + + /* protect us from being destroyed */ + spin_lock_irqsave(&percpu_ref_switch_lock, flags); + if (ref->data) + count = atomic_long_read(&ref->data->count); + else + count = ref->percpu_count_ptr >> __PERCPU_REF_FLAG_BITS; + spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); + + return count; +} + /** * percpu_ref_reinit - re-initialize a percpu refcount * @ref: perpcu_ref to re-initialize -- 2.24.0