The percpu parts are added to atomic part in percpu_ref_switch_to_atomic_rcu(), so it is readable & reasonable to clear them there. Cc: Tejun Heo <tj@xxxxxxxxxx> Cc: Jianchao Wang <jianchao.w.wang@xxxxxxxxxx> Cc: Kent Overstreet <kent.overstreet@xxxxxxxxx> Cc: linux-block@xxxxxxxxxxxxxxx Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- lib/percpu-refcount.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index 9f96fa7bc000..a220b717f6bb 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c @@ -130,8 +130,10 @@ static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu) unsigned long count = 0; int cpu; - for_each_possible_cpu(cpu) + for_each_possible_cpu(cpu) { count += *per_cpu_ptr(percpu_count, cpu); + *per_cpu_ptr(percpu_count, cpu) = 0; + } pr_debug("global %ld percpu %ld", atomic_long_read(&ref->count), (long)count); @@ -187,7 +189,6 @@ static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref, static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) { unsigned long __percpu *percpu_count = percpu_count_ptr(ref); - int cpu; BUG_ON(!percpu_count); @@ -202,9 +203,6 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) * zeroing is visible to all percpu accesses which can see the * following __PERCPU_REF_ATOMIC clearing. */ - for_each_possible_cpu(cpu) - *per_cpu_ptr(percpu_count, cpu) = 0; - smp_store_release(&ref->percpu_count_ptr, ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC); } -- 2.9.5