Enable capability analysis for rhashtable, which was used as an initial test as it contains a combination of RCU, mutex, and bit_spinlock usage. Users of rhashtable now also benefit from annotations on the API, which will now warn if the RCU read lock is not held where required. Signed-off-by: Marco Elver <elver@xxxxxxxxxx> --- include/linux/rhashtable.h | 14 +++++++++++--- lib/Makefile | 2 ++ lib/rhashtable.c | 12 +++++++++--- 3 files changed, 22 insertions(+), 6 deletions(-) diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 8463a128e2f4..c6374691ccc7 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -245,16 +245,17 @@ void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter); void rhashtable_walk_exit(struct rhashtable_iter *iter); -int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU); +int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires_shared(RCU); static inline void rhashtable_walk_start(struct rhashtable_iter *iter) + __acquires_shared(RCU) { (void)rhashtable_walk_start_check(iter); } void *rhashtable_walk_next(struct rhashtable_iter *iter); void *rhashtable_walk_peek(struct rhashtable_iter *iter); -void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); +void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases_shared(RCU); void rhashtable_free_and_destroy(struct rhashtable *ht, void (*free_fn)(void *ptr, void *arg), @@ -325,6 +326,7 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert( static inline unsigned long rht_lock(struct bucket_table *tbl, struct rhash_lock_head __rcu **bkt) + __acquires(__bitlock(0, bkt)) { unsigned long flags; @@ -337,6 +339,7 @@ static inline unsigned long rht_lock(struct bucket_table *tbl, static inline unsigned long rht_lock_nested(struct bucket_table *tbl, struct rhash_lock_head __rcu **bucket, unsigned int subclass) + __acquires(__bitlock(0, bucket)) { unsigned long flags; @@ -349,6 +352,7 @@ static inline unsigned long rht_lock_nested(struct bucket_table *tbl, static inline void rht_unlock(struct bucket_table *tbl, struct rhash_lock_head __rcu **bkt, unsigned long flags) + __releases(__bitlock(0, bkt)) { lock_map_release(&tbl->dep_map); bit_spin_unlock(0, (unsigned long *)bkt); @@ -402,13 +406,14 @@ static inline void rht_assign_unlock(struct bucket_table *tbl, struct rhash_lock_head __rcu **bkt, struct rhash_head *obj, unsigned long flags) + __releases(__bitlock(0, bkt)) { if (rht_is_a_nulls(obj)) obj = NULL; lock_map_release(&tbl->dep_map); rcu_assign_pointer(*bkt, (void *)obj); preempt_enable(); - __release(bitlock); + __release(__bitlock(0, bkt)); local_irq_restore(flags); } @@ -589,6 +594,7 @@ static inline int rhashtable_compare(struct rhashtable_compare_arg *arg, static inline struct rhash_head *__rhashtable_lookup( struct rhashtable *ht, const void *key, const struct rhashtable_params params) + __must_hold_shared(RCU) { struct rhashtable_compare_arg arg = { .ht = ht, @@ -642,6 +648,7 @@ static inline struct rhash_head *__rhashtable_lookup( static inline void *rhashtable_lookup( struct rhashtable *ht, const void *key, const struct rhashtable_params params) + __must_hold_shared(RCU) { struct rhash_head *he = __rhashtable_lookup(ht, key, params); @@ -692,6 +699,7 @@ static inline void *rhashtable_lookup_fast( static inline struct rhlist_head *rhltable_lookup( struct rhltable *hlt, const void *key, const struct rhashtable_params params) + __must_hold_shared(RCU) { struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params); diff --git a/lib/Makefile b/lib/Makefile index f40ba93c9a94..c7004270ad5f 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -45,6 +45,8 @@ lib-$(CONFIG_MIN_HEAP) += min_heap.o lib-y += kobject.o klist.o obj-y += lockref.o +CAPABILITY_ANALYSIS_rhashtable.o := y + obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \ bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ list_sort.o uuid.o iov_iter.o clz_ctz.o \ diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 3e555d012ed6..47a61e214621 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -11,6 +11,10 @@ * pointer as suggested by Josh Triplett */ +#include <linux/rhashtable.h> + +disable_capability_analysis(); + #include <linux/atomic.h> #include <linux/kernel.h> #include <linux/init.h> @@ -22,10 +26,11 @@ #include <linux/mm.h> #include <linux/jhash.h> #include <linux/random.h> -#include <linux/rhashtable.h> #include <linux/err.h> #include <linux/export.h> +enable_capability_analysis(); + #define HASH_DEFAULT_SIZE 64UL #define HASH_MIN_SIZE 4U @@ -358,6 +363,7 @@ static int rhashtable_rehash_table(struct rhashtable *ht) static int rhashtable_rehash_alloc(struct rhashtable *ht, struct bucket_table *old_tbl, unsigned int size) + __must_hold(&ht->mutex) { struct bucket_table *new_tbl; int err; @@ -392,6 +398,7 @@ static int rhashtable_rehash_alloc(struct rhashtable *ht, * bucket locks or concurrent RCU protected lookups and traversals. */ static int rhashtable_shrink(struct rhashtable *ht) + __must_hold(&ht->mutex) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); unsigned int nelems = atomic_read(&ht->nelems); @@ -724,7 +731,7 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_exit); * resize events and always continue. */ int rhashtable_walk_start_check(struct rhashtable_iter *iter) - __acquires(RCU) + __acquires_shared(RCU) { struct rhashtable *ht = iter->ht; bool rhlist = ht->rhlist; @@ -940,7 +947,6 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_peek); * hash table. */ void rhashtable_walk_stop(struct rhashtable_iter *iter) - __releases(RCU) { struct rhashtable *ht; struct bucket_table *tbl = iter->walker.tbl; -- 2.48.1.502.g6dc24dfdaf-goog