The following commit has been merged into the locking/core branch of tip: Commit-ID: 31552385f8e9d0869117014bf8e55ba0497e3ec8 Gitweb: https://git.kernel.org/tip/31552385f8e9d0869117014bf8e55ba0497e3ec8 Author: Thomas Gleixner <tglx@xxxxxxxxxxxxx> AuthorDate: Sun, 15 Aug 2021 23:29:27 +02:00 Committer: Ingo Molnar <mingo@xxxxxxxxxx> CommitterDate: Tue, 17 Aug 2021 19:06:13 +02:00 locking/spinlock/rt: Prepare for RT local_lock Add the static and runtime initializer mechanics to support the RT variant of local_lock, which requires the lock type in the lockdep map to be set to LD_LOCK_PERCPU. Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx> Link: https://lore.kernel.org/r/20210815211305.967526724@xxxxxxxxxxxxx --- include/linux/spinlock_rt.h | 24 ++++++++++++++++-------- include/linux/spinlock_types.h | 6 ++++++ include/linux/spinlock_types_raw.h | 8 ++++++++ kernel/locking/spinlock_rt.c | 7 +++++-- 4 files changed, 35 insertions(+), 10 deletions(-) diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h index 4fc7219..835aeda 100644 --- a/include/linux/spinlock_rt.h +++ b/include/linux/spinlock_rt.h @@ -8,20 +8,28 @@ #ifdef CONFIG_DEBUG_LOCK_ALLOC extern void __rt_spin_lock_init(spinlock_t *lock, const char *name, - struct lock_class_key *key); + struct lock_class_key *key, bool percpu); #else static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name, - struct lock_class_key *key) + struct lock_class_key *key, bool percpu) { } #endif -#define spin_lock_init(slock) \ -do { \ - static struct lock_class_key __key; \ - \ - rt_mutex_base_init(&(slock)->lock); \ - __rt_spin_lock_init(slock, #slock, &__key); \ +#define spin_lock_init(slock) \ +do { \ + static struct lock_class_key __key; \ + \ + rt_mutex_base_init(&(slock)->lock); \ + __rt_spin_lock_init(slock, #slock, &__key, false); \ +} while (0) + +#define local_spin_lock_init(slock) \ +do { \ + static struct lock_class_key __key; \ + \ + rt_mutex_base_init(&(slock)->lock); \ + __rt_spin_lock_init(slock, #slock, &__key, true); \ } while (0) extern void rt_spin_lock(spinlock_t *lock); diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 8a9aadb..2dfa35f 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -60,6 +60,12 @@ typedef struct spinlock { SPIN_DEP_MAP_INIT(name) \ } +#define __LOCAL_SPIN_LOCK_UNLOCKED(name) \ + { \ + .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \ + LOCAL_SPIN_DEP_MAP_INIT(name) \ + } + #define DEFINE_SPINLOCK(name) \ spinlock_t name = __SPIN_LOCK_UNLOCKED(name) diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h index a8a4330..91cb36b 100644 --- a/include/linux/spinlock_types_raw.h +++ b/include/linux/spinlock_types_raw.h @@ -37,9 +37,17 @@ typedef struct raw_spinlock { .name = #lockname, \ .wait_type_inner = LD_WAIT_CONFIG, \ } + +# define LOCAL_SPIN_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_CONFIG, \ + .lock_type = LD_LOCK_PERCPU, \ + } #else # define RAW_SPIN_DEP_MAP_INIT(lockname) # define SPIN_DEP_MAP_INIT(lockname) +# define LOCAL_SPIN_DEP_MAP_INIT(lockname) #endif #ifdef CONFIG_DEBUG_SPINLOCK diff --git a/kernel/locking/spinlock_rt.c b/kernel/locking/spinlock_rt.c index c36648b..d2912e4 100644 --- a/kernel/locking/spinlock_rt.c +++ b/kernel/locking/spinlock_rt.c @@ -120,10 +120,13 @@ EXPORT_SYMBOL(rt_spin_trylock_bh); #ifdef CONFIG_DEBUG_LOCK_ALLOC void __rt_spin_lock_init(spinlock_t *lock, const char *name, - struct lock_class_key *key) + struct lock_class_key *key, bool percpu) { + u8 type = percpu ? LD_LOCK_PERCPU : LD_LOCK_NORMAL; + debug_check_no_locks_freed((void *)lock, sizeof(*lock)); - lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG); + lockdep_init_map_type(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG, + LD_WAIT_INV, type); } EXPORT_SYMBOL(__rt_spin_lock_init); #endif