Re: [PATCH bpf-next v2 3/6] locking/local_lock: Introduce local_trylock_irqsave()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 12/10/24 03:39, Alexei Starovoitov wrote:
> From: Alexei Starovoitov <ast@xxxxxxxxxx>
> 
> Similar to local_lock_irqsave() introduce local_trylock_irqsave().
> It uses spin_trylock in PREEMPT_RT and always succeeds when !RT.

Hmm but is that correct to always succeed? If we're in an nmi, we might be
preempting an existing local_(try)lock_irqsave() critical section because
disabling irqs doesn't stop NMI's, right?

> Signed-off-by: Alexei Starovoitov <ast@xxxxxxxxxx>
> ---
>  include/linux/local_lock.h          |  9 +++++++++
>  include/linux/local_lock_internal.h | 23 +++++++++++++++++++++++
>  2 files changed, 32 insertions(+)
> 
> diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h
> index 091dc0b6bdfb..6880c29b8b98 100644
> --- a/include/linux/local_lock.h
> +++ b/include/linux/local_lock.h
> @@ -30,6 +30,15 @@
>  #define local_lock_irqsave(lock, flags)				\
>  	__local_lock_irqsave(lock, flags)
>  
> +/**
> + * local_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
> + *			   interrupts. Always succeeds in !PREEMPT_RT.
> + * @lock:	The lock variable
> + * @flags:	Storage for interrupt flags
> + */
> +#define local_trylock_irqsave(lock, flags)			\
> +	__local_trylock_irqsave(lock, flags)
> +
>  /**
>   * local_unlock - Release a per CPU local lock
>   * @lock:	The lock variable
> diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
> index 8dd71fbbb6d2..2c0f8a49c2d0 100644
> --- a/include/linux/local_lock_internal.h
> +++ b/include/linux/local_lock_internal.h
> @@ -31,6 +31,13 @@ static inline void local_lock_acquire(local_lock_t *l)
>  	l->owner = current;
>  }
>  
> +static inline void local_trylock_acquire(local_lock_t *l)
> +{
> +	lock_map_acquire_try(&l->dep_map);
> +	DEBUG_LOCKS_WARN_ON(l->owner);
> +	l->owner = current;
> +}
> +
>  static inline void local_lock_release(local_lock_t *l)
>  {
>  	DEBUG_LOCKS_WARN_ON(l->owner != current);
> @@ -45,6 +52,7 @@ static inline void local_lock_debug_init(local_lock_t *l)
>  #else /* CONFIG_DEBUG_LOCK_ALLOC */
>  # define LOCAL_LOCK_DEBUG_INIT(lockname)
>  static inline void local_lock_acquire(local_lock_t *l) { }
> +static inline void local_trylock_acquire(local_lock_t *l) { }
>  static inline void local_lock_release(local_lock_t *l) { }
>  static inline void local_lock_debug_init(local_lock_t *l) { }
>  #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
> @@ -91,6 +99,13 @@ do {								\
>  		local_lock_acquire(this_cpu_ptr(lock));		\
>  	} while (0)
>  
> +#define __local_trylock_irqsave(lock, flags)			\
> +	({							\
> +		local_irq_save(flags);				\
> +		local_trylock_acquire(this_cpu_ptr(lock));	\
> +		1;						\
> +	})
> +
>  #define __local_unlock(lock)					\
>  	do {							\
>  		local_lock_release(this_cpu_ptr(lock));		\
> @@ -148,6 +163,14 @@ typedef spinlock_t local_lock_t;
>  		__local_lock(lock);				\
>  	} while (0)
>  
> +#define __local_trylock_irqsave(lock, flags)			\
> +	({							\
> +		typecheck(unsigned long, flags);		\
> +		flags = 0;					\
> +		migrate_disable();				\
> +		spin_trylock(this_cpu_ptr((__lock)));		\
> +	})
> +
>  #define __local_unlock(__lock)					\
>  	do {							\
>  		spin_unlock(this_cpu_ptr((__lock)));		\





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux