Prepare for support freezing bio based request queues. Cc: Tejun Heo <tj@xxxxxxxxxx> Cc: Christoph Hellwig <hch@xxxxxx> Cc: Bart Van Assche <bvanassche@xxxxxxx> Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> --- include/linux/percpu-refcount.h | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 16c35a728b4c..9061c7e3113d 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -267,8 +267,9 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) } /** - * percpu_ref_tryget_live - try to increment a live percpu refcount + * percpu_ref_tryget_many_live - try to increment a live percpu refcount * @ref: percpu_ref to try-get + * @nr: number of references to get * * Increment a percpu refcount unless it has already been killed. Returns * %true on success; %false on failure. @@ -281,7 +282,8 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) * * This function is safe to call as long as @ref is between init and exit. */ -static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) +static inline bool percpu_ref_tryget_many_live(struct percpu_ref *ref, + unsigned long nr) { unsigned long __percpu *percpu_count; bool ret = false; @@ -289,10 +291,10 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) rcu_read_lock(); if (__ref_is_percpu(ref, &percpu_count)) { - this_cpu_inc(*percpu_count); + this_cpu_add(*percpu_count, nr); ret = true; } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) { - ret = atomic_long_inc_not_zero(&ref->data->count); + ret = atomic_long_add_unless(&ref->data->count, nr, 0); } rcu_read_unlock(); @@ -300,6 +302,26 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) return ret; } +/** + * percpu_ref_tryget_live - try to increment a live percpu refcount + * @ref: percpu_ref to try-get + * + * Increment a percpu refcount unless it has already been killed. Returns + * %true on success; %false on failure. + * + * Completion of percpu_ref_kill() in itself doesn't guarantee that this + * function will fail. For such guarantee, percpu_ref_kill_and_confirm() + * should be used. After the confirm_kill callback is invoked, it's + * guaranteed that no new reference will be given out by + * percpu_ref_tryget_live(). + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) +{ + return percpu_ref_tryget_many_live(ref, 1); +} + /** * percpu_ref_put_many - decrement a percpu refcount * @ref: percpu_ref to put -- 2.29.2