Similar to the existing functions that take a mutex or spinlock if and only if a reference count is decremented to zero, these new function take an rwsem for writing just before the refcount reaches 0 (and call a user-provided function in the case of kref_put_rwsem). These will be used for statsfs_source data structures, which are protected by an rw_semaphore to allow concurrent sysfs reads. Signed-off-by: Emanuele Giuseppe Esposito <eesposit@xxxxxxxxxx> --- include/linux/kref.h | 11 +++++++++++ include/linux/refcount.h | 2 ++ lib/refcount.c | 32 ++++++++++++++++++++++++++++++++ 3 files changed, 45 insertions(+) diff --git a/include/linux/kref.h b/include/linux/kref.h index d32e21a2538c..2dc935445f45 100644 --- a/include/linux/kref.h +++ b/include/linux/kref.h @@ -79,6 +79,17 @@ static inline int kref_put_mutex(struct kref *kref, return 0; } +static inline int kref_put_rwsem(struct kref *kref, + void (*release)(struct kref *kref), + struct rw_semaphore *rwsem) +{ + if (refcount_dec_and_down_write(&kref->refcount, rwsem)) { + release(kref); + return 1; + } + return 0; +} + static inline int kref_put_lock(struct kref *kref, void (*release)(struct kref *kref), spinlock_t *lock) diff --git a/include/linux/refcount.h b/include/linux/refcount.h index 0e3ee25eb156..a9d5038aec9a 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -99,6 +99,7 @@ #include <linux/spinlock_types.h> struct mutex; +struct rw_semaphore; /** * struct refcount_t - variant of atomic_t specialized for reference counts @@ -313,6 +314,7 @@ static inline void refcount_dec(refcount_t *r) extern __must_check bool refcount_dec_if_one(refcount_t *r); extern __must_check bool refcount_dec_not_one(refcount_t *r); extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock); +extern __must_check bool refcount_dec_and_down_write(refcount_t *r, struct rw_semaphore *rwsem); extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock); extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock, diff --git a/lib/refcount.c b/lib/refcount.c index ebac8b7d15a7..03e113e1b43a 100644 --- a/lib/refcount.c +++ b/lib/refcount.c @@ -4,6 +4,7 @@ */ #include <linux/mutex.h> +#include <linux/rwsem.h> #include <linux/refcount.h> #include <linux/spinlock.h> #include <linux/bug.h> @@ -94,6 +95,37 @@ bool refcount_dec_not_one(refcount_t *r) } EXPORT_SYMBOL(refcount_dec_not_one); +/** + * refcount_dec_and_down_write - return holding rwsem for writing if able to decrement + * refcount to 0 + * @r: the refcount + * @lock: the mutex to be locked + * + * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail + * to decrement when saturated at REFCOUNT_SATURATED. + * + * Provides release memory ordering, such that prior loads and stores are done + * before, and provides a control dependency such that free() must come after. + * See the comment on top. + * + * Return: true and hold rwsem for writing if able to decrement refcount to 0, false + * otherwise + */ +bool refcount_dec_and_down_write(refcount_t *r, struct rw_semaphore *lock) +{ + if (refcount_dec_not_one(r)) + return false; + + down_write(lock); + if (!refcount_dec_and_test(r)) { + up_write(lock); + return false; + } + + return true; +} +EXPORT_SYMBOL(refcount_dec_and_down_write); + /** * refcount_dec_and_mutex_lock - return holding mutex if able to decrement * refcount to 0 -- 2.25.2