From: Alexei Starovoitov <ast@xxxxxxxxxx> In !PREEMPT_RT local_lock_irqsave() disables interrupts to protect critical section, but it doesn't prevent NMI, so the fully reentrant code cannot use local_lock_irqsave() for exclusive access. Introduce local_trylock_t and local_trylock_irqsave() that disables interrupts and sets active=1, so local_trylock_irqsave() from NMI of the same lock will return false. In PREEMPT_RT local_lock_irqsave() maps to preemptible spin_lock(). Map local_trylock_irqsave() to preemptible spin_trylock(). When in hard IRQ or NMI return false right away, since spin_trylock() is not safe due to PI issues. Note there is no need to use local_inc for active variable, since it's a percpu variable with strict nesting scopes. Usage: local_lock_t lock; // sizeof(lock) == 0 in !RT local_lock_irqsave(&lock, ...); // irqsave as before if (local_trylock_irqsave(&lock, ...)) // compilation error local_trylock_t lock; // sizeof(lock) == 4 in !RT local_lock_irqsave(&lock, ...); // irqsave and active = 1 if (local_trylock_irqsave(&lock, ...)) // if (!active) irqsave Signed-off-by: Alexei Starovoitov <ast@xxxxxxxxxx> --- include/linux/local_lock.h | 9 ++++ include/linux/local_lock_internal.h | 79 ++++++++++++++++++++++++++++- 2 files changed, 86 insertions(+), 2 deletions(-) diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h index 091dc0b6bdfb..f4bc3e9b2b20 100644 --- a/include/linux/local_lock.h +++ b/include/linux/local_lock.h @@ -30,6 +30,15 @@ #define local_lock_irqsave(lock, flags) \ __local_lock_irqsave(lock, flags) +/** + * local_trylock_irqsave - Try to acquire a per CPU local lock, save and disable + * interrupts. Fails in PREEMPT_RT when in hard IRQ or NMI. + * @lock: The lock variable + * @flags: Storage for interrupt flags + */ +#define local_trylock_irqsave(lock, flags) \ + __local_trylock_irqsave(lock, flags) + /** * local_unlock - Release a per CPU local lock * @lock: The lock variable diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index 8dd71fbbb6d2..14757b7aea99 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -15,6 +15,19 @@ typedef struct { #endif } local_lock_t; +typedef struct { +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; + struct task_struct *owner; +#endif + /* + * Same layout as local_lock_t with 'active' field + * at the end, since (local_trylock_t *) will be + * casted to (local_lock_t *). + */ + int active; +} local_trylock_t; + #ifdef CONFIG_DEBUG_LOCK_ALLOC # define LOCAL_LOCK_DEBUG_INIT(lockname) \ .dep_map = { \ @@ -31,6 +44,13 @@ static inline void local_lock_acquire(local_lock_t *l) l->owner = current; } +static inline void local_trylock_acquire(local_lock_t *l) +{ + lock_map_acquire_try(&l->dep_map); + DEBUG_LOCKS_WARN_ON(l->owner); + l->owner = current; +} + static inline void local_lock_release(local_lock_t *l) { DEBUG_LOCKS_WARN_ON(l->owner != current); @@ -45,6 +65,7 @@ static inline void local_lock_debug_init(local_lock_t *l) #else /* CONFIG_DEBUG_LOCK_ALLOC */ # define LOCAL_LOCK_DEBUG_INIT(lockname) static inline void local_lock_acquire(local_lock_t *l) { } +static inline void local_trylock_acquire(local_lock_t *l) { } static inline void local_lock_release(local_lock_t *l) { } static inline void local_lock_debug_init(local_lock_t *l) { } #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ @@ -87,10 +108,37 @@ do { \ #define __local_lock_irqsave(lock, flags) \ do { \ + local_trylock_t *tl; \ + local_lock_t *l; \ local_irq_save(flags); \ - local_lock_acquire(this_cpu_ptr(lock)); \ + l = (local_lock_t *)this_cpu_ptr(lock); \ + tl = (local_trylock_t *)l; \ + _Generic((lock), \ + local_trylock_t *: ({ \ + lockdep_assert(tl->active == 0);\ + WRITE_ONCE(tl->active, 1); \ + }), \ + default:(void)0); \ + local_lock_acquire(l); \ } while (0) + +#define __local_trylock_irqsave(lock, flags) \ + ({ \ + local_trylock_t *tl; \ + local_irq_save(flags); \ + tl = this_cpu_ptr(lock); \ + if (READ_ONCE(tl->active) == 1) { \ + local_irq_restore(flags); \ + tl = NULL; \ + } else { \ + WRITE_ONCE(tl->active, 1); \ + local_trylock_acquire( \ + (local_lock_t *)tl); \ + } \ + !!tl; \ + }) + #define __local_unlock(lock) \ do { \ local_lock_release(this_cpu_ptr(lock)); \ @@ -105,7 +153,17 @@ do { \ #define __local_unlock_irqrestore(lock, flags) \ do { \ - local_lock_release(this_cpu_ptr(lock)); \ + local_trylock_t *tl; \ + local_lock_t *l; \ + l = (local_lock_t *)this_cpu_ptr(lock); \ + tl = (local_trylock_t *)l; \ + _Generic((lock), \ + local_trylock_t *: ({ \ + lockdep_assert(tl->active == 1);\ + WRITE_ONCE(tl->active, 0); \ + }), \ + default:(void)0); \ + local_lock_release(l); \ local_irq_restore(flags); \ } while (0) @@ -125,6 +183,7 @@ do { \ * critical section while staying preemptible. */ typedef spinlock_t local_lock_t; +typedef spinlock_t local_trylock_t; #define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname)) @@ -148,6 +207,22 @@ typedef spinlock_t local_lock_t; __local_lock(lock); \ } while (0) +#define __local_trylock_irqsave(lock, flags) \ + ({ \ + __label__ out; \ + int ret = 0; \ + typecheck(unsigned long, flags); \ + flags = 0; \ + if (in_nmi() || in_hardirq()) \ + goto out; \ + migrate_disable(); \ + ret = spin_trylock(this_cpu_ptr((lock))); \ + if (!ret) \ + migrate_enable(); \ + out: \ + ret; \ + }) + #define __local_unlock(__lock) \ do { \ spin_unlock(this_cpu_ptr((__lock))); \ -- 2.43.5