We only have two users in which percpu_ref_exit() is called from ->release(). Add flag of PERCPU_REF_AUTO_EXIT for avoiding to call percpu_ref_exit() from ->release() directly since we need to drain ->release() in percpu_ref_exit() for fixing use-after-free. Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- include/linux/percpu-refcount.h | 21 +++++++++++++++++++-- lib/percpu-refcount.c | 9 ++++++--- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index d73a1c08c3e3..006c6aae261e 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -90,6 +90,11 @@ enum { * Allow switching from atomic mode to percpu mode. */ PERCPU_REF_ALLOW_REINIT = 1 << 2, + + /* + * call percpu_ref_exit() when releasing + */ + PERCPU_REF_AUTO_EXIT = 1 << 3, }; struct percpu_ref_data { @@ -98,6 +103,7 @@ struct percpu_ref_data { percpu_ref_func_t *confirm_switch; bool force_atomic:1; bool allow_reinit:1; + bool auto_exit:1; struct rcu_head rcu; struct percpu_ref *ref; }; @@ -331,8 +337,19 @@ static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr) if (__ref_is_percpu(ref, &percpu_count)) this_cpu_sub(*percpu_count, nr); - else if (unlikely(atomic_long_sub_and_test(nr, &ref->data->count))) - ref->data->release(ref); + else { + struct percpu_ref_data *data = ref->data; + struct percpu_ref copy = *ref; + bool release = false; + + if (unlikely(atomic_long_sub_and_test(nr, &data->count))) { + data->release(ref); + release = true; + } + + if (release && data->auto_exit) + percpu_ref_exit(©); + } rcu_read_unlock(); } diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index 668f6aa6a75d..c0cadf92948f 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c @@ -82,6 +82,7 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, data->force_atomic = flags & PERCPU_REF_INIT_ATOMIC; data->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT; + data->auto_exit = flags & PERCPU_REF_AUTO_EXIT; if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) { ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; @@ -123,9 +124,11 @@ static void __percpu_ref_exit(struct percpu_ref *ref) * * This function exits @ref. The caller is responsible for ensuring that * @ref is no longer in active use. The usual places to invoke this - * function from are the @ref->release() callback or in init failure path - * where percpu_ref_init() succeeded but other parts of the initialization - * of the embedding object failed. + * function from are where the refcounter is confirmed as idle or in init + * failure path where percpu_ref_init() succeeded but other parts of the + * initialization of the embedding object failed. For caller which needs + * to call percpu_ref_exit() in ->release, please pass PERCPU_REF_AUTO_EXIT + * to percpu_ref_init(). */ void percpu_ref_exit(struct percpu_ref *ref) { -- 2.38.1