Commit-ID: 29dee3c03abce04cd527878ef5f9e5f91b7b83f4 Gitweb: http://git.kernel.org/tip/29dee3c03abce04cd527878ef5f9e5f91b7b83f4 Author: Peter Zijlstra <peterz@xxxxxxxxxxxxx> AuthorDate: Fri, 10 Feb 2017 16:27:52 +0100 Committer: Ingo Molnar <mingo@xxxxxxxxxx> CommitDate: Fri, 24 Feb 2017 09:02:10 +0100 locking/refcounts: Out-of-line everything Linus asked to please make this real C code. And since size then isn't an issue what so ever anymore, remove the debug knob and make all WARN()s unconditional. Suggested-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: dwindsor@xxxxxxxxx Cc: elena.reshetova@xxxxxxxxx Cc: gregkh@xxxxxxxxxxxxxxxxxxx Cc: ishkamiel@xxxxxxxxx Cc: keescook@xxxxxxxxxxxx Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx> --- include/linux/refcount.h | 277 ++--------------------------- lib/Kconfig.debug | 13 -- lib/Makefile | 2 +- include/linux/refcount.h => lib/refcount.c | 87 ++++----- 4 files changed, 43 insertions(+), 336 deletions(-) diff --git a/include/linux/refcount.h b/include/linux/refcount.h index 600aadf..0e8cfb2 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -1,55 +1,10 @@ #ifndef _LINUX_REFCOUNT_H #define _LINUX_REFCOUNT_H -/* - * Variant of atomic_t specialized for reference counts. - * - * The interface matches the atomic_t interface (to aid in porting) but only - * provides the few functions one should use for reference counting. - * - * It differs in that the counter saturates at UINT_MAX and will not move once - * there. This avoids wrapping the counter and causing 'spurious' - * use-after-free issues. - * - * Memory ordering rules are slightly relaxed wrt regular atomic_t functions - * and provide only what is strictly required for refcounts. - * - * The increments are fully relaxed; these will not provide ordering. The - * rationale is that whatever is used to obtain the object we're increasing the - * reference count on will provide the ordering. For locked data structures, - * its the lock acquire, for RCU/lockless data structures its the dependent - * load. - * - * Do note that inc_not_zero() provides a control dependency which will order - * future stores against the inc, this ensures we'll never modify the object - * if we did not in fact acquire a reference. - * - * The decrements will provide release order, such that all the prior loads and - * stores will be issued before, it also provides a control dependency, which - * will order us against the subsequent free(). - * - * The control dependency is against the load of the cmpxchg (ll/sc) that - * succeeded. This means the stores aren't fully ordered, but this is fine - * because the 1->0 transition indicates no concurrency. - * - * Note that the allocator is responsible for ordering things between free() - * and alloc(). - * - */ - #include <linux/atomic.h> -#include <linux/bug.h> #include <linux/mutex.h> #include <linux/spinlock.h> -#ifdef CONFIG_DEBUG_REFCOUNT -#define REFCOUNT_WARN(cond, str) WARN_ON(cond) -#define __refcount_check __must_check -#else -#define REFCOUNT_WARN(cond, str) (void)(cond) -#define __refcount_check -#endif - typedef struct refcount_struct { atomic_t refs; } refcount_t; @@ -66,229 +21,21 @@ static inline unsigned int refcount_read(const refcount_t *r) return atomic_read(&r->refs); } -static inline __refcount_check -bool refcount_add_not_zero(unsigned int i, refcount_t *r) -{ - unsigned int old, new, val = atomic_read(&r->refs); - - for (;;) { - if (!val) - return false; - - if (unlikely(val == UINT_MAX)) - return true; - - new = val + i; - if (new < val) - new = UINT_MAX; - old = atomic_cmpxchg_relaxed(&r->refs, val, new); - if (old == val) - break; - - val = old; - } - - REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); - - return true; -} - -static inline void refcount_add(unsigned int i, refcount_t *r) -{ - REFCOUNT_WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); -} - -/* - * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN. - * - * Provides no memory ordering, it is assumed the caller has guaranteed the - * object memory to be stable (RCU, etc.). It does provide a control dependency - * and thereby orders future stores. See the comment on top. - */ -static inline __refcount_check -bool refcount_inc_not_zero(refcount_t *r) -{ - unsigned int old, new, val = atomic_read(&r->refs); - - for (;;) { - new = val + 1; - - if (!val) - return false; - - if (unlikely(!new)) - return true; - - old = atomic_cmpxchg_relaxed(&r->refs, val, new); - if (old == val) - break; - - val = old; - } - - REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); - - return true; -} - -/* - * Similar to atomic_inc(), will saturate at UINT_MAX and WARN. - * - * Provides no memory ordering, it is assumed the caller already has a - * reference on the object, will WARN when this is not so. - */ -static inline void refcount_inc(refcount_t *r) -{ - REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); -} - -/* - * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to - * decrement when saturated at UINT_MAX. - * - * Provides release memory ordering, such that prior loads and stores are done - * before, and provides a control dependency such that free() must come after. - * See the comment on top. - */ -static inline __refcount_check -bool refcount_sub_and_test(unsigned int i, refcount_t *r) -{ - unsigned int old, new, val = atomic_read(&r->refs); - - for (;;) { - if (unlikely(val == UINT_MAX)) - return false; - - new = val - i; - if (new > val) { - REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n"); - return false; - } - - old = atomic_cmpxchg_release(&r->refs, val, new); - if (old == val) - break; - - val = old; - } - - return !new; -} - -static inline __refcount_check -bool refcount_dec_and_test(refcount_t *r) -{ - return refcount_sub_and_test(1, r); -} +extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r); +extern void refcount_add(unsigned int i, refcount_t *r); -/* - * Similar to atomic_dec(), it will WARN on underflow and fail to decrement - * when saturated at UINT_MAX. - * - * Provides release memory ordering, such that prior loads and stores are done - * before. - */ -static inline -void refcount_dec(refcount_t *r) -{ - REFCOUNT_WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); -} - -/* - * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the - * success thereof. - * - * Like all decrement operations, it provides release memory order and provides - * a control dependency. - * - * It can be used like a try-delete operator; this explicit case is provided - * and not cmpxchg in generic, because that would allow implementing unsafe - * operations. - */ -static inline __refcount_check -bool refcount_dec_if_one(refcount_t *r) -{ - return atomic_cmpxchg_release(&r->refs, 1, 0) == 1; -} - -/* - * No atomic_t counterpart, it decrements unless the value is 1, in which case - * it will return false. - * - * Was often done like: atomic_add_unless(&var, -1, 1) - */ -static inline __refcount_check -bool refcount_dec_not_one(refcount_t *r) -{ - unsigned int old, new, val = atomic_read(&r->refs); +extern __must_check bool refcount_inc_not_zero(refcount_t *r); +extern void refcount_inc(refcount_t *r); - for (;;) { - if (unlikely(val == UINT_MAX)) - return true; +extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r); +extern void refcount_sub(unsigned int i, refcount_t *r); - if (val == 1) - return false; +extern __must_check bool refcount_dec_and_test(refcount_t *r); +extern void refcount_dec(refcount_t *r); - new = val - 1; - if (new > val) { - REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n"); - return true; - } - - old = atomic_cmpxchg_release(&r->refs, val, new); - if (old == val) - break; - - val = old; - } - - return true; -} - -/* - * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail - * to decrement when saturated at UINT_MAX. - * - * Provides release memory ordering, such that prior loads and stores are done - * before, and provides a control dependency such that free() must come after. - * See the comment on top. - */ -static inline __refcount_check -bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) -{ - if (refcount_dec_not_one(r)) - return false; - - mutex_lock(lock); - if (!refcount_dec_and_test(r)) { - mutex_unlock(lock); - return false; - } - - return true; -} - -/* - * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to - * decrement when saturated at UINT_MAX. - * - * Provides release memory ordering, such that prior loads and stores are done - * before, and provides a control dependency such that free() must come after. - * See the comment on top. - */ -static inline __refcount_check -bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) -{ - if (refcount_dec_not_one(r)) - return false; - - spin_lock(lock); - if (!refcount_dec_and_test(r)) { - spin_unlock(lock); - return false; - } - - return true; -} +extern __must_check bool refcount_dec_if_one(refcount_t *r); +extern __must_check bool refcount_dec_not_one(refcount_t *r); +extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock); +extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock); #endif /* _LINUX_REFCOUNT_H */ diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index acedbe6..0dbce99 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -716,19 +716,6 @@ source "lib/Kconfig.kmemcheck" source "lib/Kconfig.kasan" -config DEBUG_REFCOUNT - bool "Verbose refcount checks" - help - Say Y here if you want reference counters (refcount_t and kref) to - generate WARNs on dubious usage. Without this refcount_t will still - be a saturating counter and avoid Use-After-Free by turning it into - a resource leak Denial-Of-Service. - - Use of this option will increase kernel text size but will alert the - admin of potential abuse. - - If in doubt, say "N". - endmenu # "Memory Debugging" config ARCH_HAS_KCOV diff --git a/lib/Makefile b/lib/Makefile index 19ea761..192e4d0 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -36,7 +36,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \ gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ bsearch.o find_bit.o llist.o memweight.o kfifo.o \ percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \ - once.o + once.o refcount.o obj-y += string_helpers.o obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o obj-y += hexdump.o diff --git a/include/linux/refcount.h b/lib/refcount.c similarity index 78% copy from include/linux/refcount.h copy to lib/refcount.c index 600aadf..1d33366 100644 --- a/include/linux/refcount.h +++ b/lib/refcount.c @@ -1,6 +1,3 @@ -#ifndef _LINUX_REFCOUNT_H -#define _LINUX_REFCOUNT_H - /* * Variant of atomic_t specialized for reference counts. * @@ -37,36 +34,9 @@ * */ -#include <linux/atomic.h> +#include <linux/refcount.h> #include <linux/bug.h> -#include <linux/mutex.h> -#include <linux/spinlock.h> - -#ifdef CONFIG_DEBUG_REFCOUNT -#define REFCOUNT_WARN(cond, str) WARN_ON(cond) -#define __refcount_check __must_check -#else -#define REFCOUNT_WARN(cond, str) (void)(cond) -#define __refcount_check -#endif - -typedef struct refcount_struct { - atomic_t refs; -} refcount_t; - -#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), } - -static inline void refcount_set(refcount_t *r, unsigned int n) -{ - atomic_set(&r->refs, n); -} -static inline unsigned int refcount_read(const refcount_t *r) -{ - return atomic_read(&r->refs); -} - -static inline __refcount_check bool refcount_add_not_zero(unsigned int i, refcount_t *r) { unsigned int old, new, val = atomic_read(&r->refs); @@ -88,15 +58,17 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r) val = old; } - REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); + WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); return true; } +EXPORT_SYMBOL_GPL(refcount_add_not_zero); -static inline void refcount_add(unsigned int i, refcount_t *r) +void refcount_add(unsigned int i, refcount_t *r) { - REFCOUNT_WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); + WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); } +EXPORT_SYMBOL_GPL(refcount_add); /* * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN. @@ -105,7 +77,6 @@ static inline void refcount_add(unsigned int i, refcount_t *r) * object memory to be stable (RCU, etc.). It does provide a control dependency * and thereby orders future stores. See the comment on top. */ -static inline __refcount_check bool refcount_inc_not_zero(refcount_t *r) { unsigned int old, new, val = atomic_read(&r->refs); @@ -126,10 +97,11 @@ bool refcount_inc_not_zero(refcount_t *r) val = old; } - REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); + WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); return true; } +EXPORT_SYMBOL_GPL(refcount_inc_not_zero); /* * Similar to atomic_inc(), will saturate at UINT_MAX and WARN. @@ -137,20 +109,12 @@ bool refcount_inc_not_zero(refcount_t *r) * Provides no memory ordering, it is assumed the caller already has a * reference on the object, will WARN when this is not so. */ -static inline void refcount_inc(refcount_t *r) +void refcount_inc(refcount_t *r) { - REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); + WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n"); } +EXPORT_SYMBOL_GPL(refcount_inc); -/* - * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to - * decrement when saturated at UINT_MAX. - * - * Provides release memory ordering, such that prior loads and stores are done - * before, and provides a control dependency such that free() must come after. - * See the comment on top. - */ -static inline __refcount_check bool refcount_sub_and_test(unsigned int i, refcount_t *r) { unsigned int old, new, val = atomic_read(&r->refs); @@ -161,7 +125,7 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r) new = val - i; if (new > val) { - REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n"); + WARN(new > val, "refcount_t: underflow; use-after-free.\n"); return false; } @@ -174,12 +138,21 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r) return !new; } +EXPORT_SYMBOL_GPL(refcount_sub_and_test); -static inline __refcount_check +/* + * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to + * decrement when saturated at UINT_MAX. + * + * Provides release memory ordering, such that prior loads and stores are done + * before, and provides a control dependency such that free() must come after. + * See the comment on top. + */ bool refcount_dec_and_test(refcount_t *r) { return refcount_sub_and_test(1, r); } +EXPORT_SYMBOL_GPL(refcount_dec_and_test); /* * Similar to atomic_dec(), it will WARN on underflow and fail to decrement @@ -188,11 +161,12 @@ bool refcount_dec_and_test(refcount_t *r) * Provides release memory ordering, such that prior loads and stores are done * before. */ -static inline + void refcount_dec(refcount_t *r) { - REFCOUNT_WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); + WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); } +EXPORT_SYMBOL_GPL(refcount_dec); /* * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the @@ -205,11 +179,11 @@ void refcount_dec(refcount_t *r) * and not cmpxchg in generic, because that would allow implementing unsafe * operations. */ -static inline __refcount_check bool refcount_dec_if_one(refcount_t *r) { return atomic_cmpxchg_release(&r->refs, 1, 0) == 1; } +EXPORT_SYMBOL_GPL(refcount_dec_if_one); /* * No atomic_t counterpart, it decrements unless the value is 1, in which case @@ -217,7 +191,6 @@ bool refcount_dec_if_one(refcount_t *r) * * Was often done like: atomic_add_unless(&var, -1, 1) */ -static inline __refcount_check bool refcount_dec_not_one(refcount_t *r) { unsigned int old, new, val = atomic_read(&r->refs); @@ -231,7 +204,7 @@ bool refcount_dec_not_one(refcount_t *r) new = val - 1; if (new > val) { - REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n"); + WARN(new > val, "refcount_t: underflow; use-after-free.\n"); return true; } @@ -244,6 +217,7 @@ bool refcount_dec_not_one(refcount_t *r) return true; } +EXPORT_SYMBOL_GPL(refcount_dec_not_one); /* * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail @@ -253,7 +227,6 @@ bool refcount_dec_not_one(refcount_t *r) * before, and provides a control dependency such that free() must come after. * See the comment on top. */ -static inline __refcount_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) { if (refcount_dec_not_one(r)) @@ -267,6 +240,7 @@ bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) return true; } +EXPORT_SYMBOL_GPL(refcount_dec_and_mutex_lock); /* * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to @@ -276,7 +250,6 @@ bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) * before, and provides a control dependency such that free() must come after. * See the comment on top. */ -static inline __refcount_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) { if (refcount_dec_not_one(r)) @@ -290,5 +263,5 @@ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) return true; } +EXPORT_SYMBOL_GPL(refcount_dec_and_lock); -#endif /* _LINUX_REFCOUNT_H */ -- To unsubscribe from this list: send the line "unsubscribe linux-tip-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html
![]() |