Re: [PATCH v2 5/5] mm/slub: simplify __cmpxchg_double_slab() and slab_[un]lock()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, Aug 23, 2022 at 07:04:00PM +0200, Vlastimil Babka wrote:
> The PREEMPT_RT specific disabling of irqs in __cmpxchg_double_slab()
> (through slab_[un]lock()) is unnecessary as bit_spin_lock() disables
> preemption and that's sufficient on RT where interrupts are threaded.
> 
> That means we no longer need the slab_[un]lock() wrappers, so delete
> them and rename the current __slab_[un]lock() to slab_[un]lock().
>

I'm not familiar with PREEMPT_RT preemption model so not sure I'm following.

1) Does "interrupts are threaded on RT" mean processing _most_ (all handlers
   that did not specified IRQF_NO_THREAD) of interrupts are delayed to irq threads
   and processed later in process context, and the kernel *never* use
   spinlock_t, local_lock_t that does not disable interrupts (and sleep) on RT
   in hardware/software interrupt context?

2) Do we need disabling irq in cmpxchg_double_slab() on RT?

BTW Is there a good documentation/papers on PREEMPT_RT preemption model?
I tried to find but only found Documentation/locking/locktypes.rst :(

Thanks!

> Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx>
> Acked-by: David Rientjes <rientjes@xxxxxxxxxx>
> ---
>  mm/slub.c | 39 ++++++++++++---------------------------
>  1 file changed, 12 insertions(+), 27 deletions(-)
> 
> diff --git a/mm/slub.c b/mm/slub.c
> index 0444a2ba4f12..bb8c1292d7e8 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -446,7 +446,7 @@ slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
>  /*
>   * Per slab locking using the pagelock
>   */
> -static __always_inline void __slab_lock(struct slab *slab)
> +static __always_inline void slab_lock(struct slab *slab)
>  {
>  	struct page *page = slab_page(slab);
>  
> @@ -454,7 +454,7 @@ static __always_inline void __slab_lock(struct slab *slab)
>  	bit_spin_lock(PG_locked, &page->flags);
>  }
>  
> -static __always_inline void __slab_unlock(struct slab *slab)
> +static __always_inline void slab_unlock(struct slab *slab)
>  {
>  	struct page *page = slab_page(slab);
>  
> @@ -462,24 +462,12 @@ static __always_inline void __slab_unlock(struct slab *slab)
>  	__bit_spin_unlock(PG_locked, &page->flags);
>  }
>  
> -static __always_inline void slab_lock(struct slab *slab, unsigned long *flags)
> -{
> -	if (IS_ENABLED(CONFIG_PREEMPT_RT))
> -		local_irq_save(*flags);
> -	__slab_lock(slab);
> -}
> -
> -static __always_inline void slab_unlock(struct slab *slab, unsigned long *flags)
> -{
> -	__slab_unlock(slab);
> -	if (IS_ENABLED(CONFIG_PREEMPT_RT))
> -		local_irq_restore(*flags);
> -}
> -
>  /*
>   * Interrupts must be disabled (for the fallback code to work right), typically
> - * by an _irqsave() lock variant. Except on PREEMPT_RT where locks are different
> - * so we disable interrupts as part of slab_[un]lock().
> + * by an _irqsave() lock variant. Except on PREEMPT_RT where these variants do
> + * not actually disable interrupts. On the other hand the migrate_disable()
> + * done by bit_spin_lock() is sufficient on PREEMPT_RT thanks to its threaded
> + * interrupts.
>   */
>  static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab,
>  		void *freelist_old, unsigned long counters_old,
> @@ -498,18 +486,15 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab
>  	} else
>  #endif
>  	{
> -		/* init to 0 to prevent spurious warnings */
> -		unsigned long flags = 0;
> -
> -		slab_lock(slab, &flags);
> +		slab_lock(slab);
>  		if (slab->freelist == freelist_old &&
>  					slab->counters == counters_old) {
>  			slab->freelist = freelist_new;
>  			slab->counters = counters_new;
> -			slab_unlock(slab, &flags);
> +			slab_unlock(slab);
>  			return true;
>  		}
> -		slab_unlock(slab, &flags);
> +		slab_unlock(slab);
>  	}
>  
>  	cpu_relax();
> @@ -540,16 +525,16 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab,
>  		unsigned long flags;
>  
>  		local_irq_save(flags);
> -		__slab_lock(slab);
> +		slab_lock(slab);
>  		if (slab->freelist == freelist_old &&
>  					slab->counters == counters_old) {
>  			slab->freelist = freelist_new;
>  			slab->counters = counters_new;
> -			__slab_unlock(slab);
> +			slab_unlock(slab);
>  			local_irq_restore(flags);
>  			return true;
>  		}
> -		__slab_unlock(slab);
> +		slab_unlock(slab);
>  		local_irq_restore(flags);
>  	}
>  
> -- 
> 2.37.2
> 

-- 
Thanks,
Hyeonggon




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux