Re: [PATCH] parisc: Prevent optimization of loads and stores in atomic operations

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 13.06.20 17:20, John David Anglin wrote:
> Stalls are quite frequent with recent kernels.  When the stall is detected by rcu_sched, we
> get a backtrace similar to the following:
...
> The fix is to use a volatile pointer for the accesses in spinlocks.  This prevents gcc from
> optimizing the accesses.

Very nice finding!
If this really fixes the stalls we have seen in the past, then that's a BIG step forward!

...
> diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
> index 70ffbcf889b8..5904966ca957 100644
> --- a/arch/parisc/lib/bitops.c
> +++ b/arch/parisc/lib/bitops.c
> @@ -21,11 +21,12 @@ arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
>  unsigned long __xchg64(unsigned long x, unsigned long *ptr)
>  {
>  	unsigned long temp, flags;
> +	volatile unsigned long *p = ptr;

Regarding your changes to bitops.c, can't we instead
simply tag the parameter ptr with "volatile", e.g.
 unsigned long __xchg64(unsigned long x, volatile unsigned long *ptr)
 unsigned long __xchg32(int x, volatile int *ptr)
 unsigned long __xchg8(char x, volatile char *ptr)

Helge


>
> -	_atomic_spin_lock_irqsave(ptr, flags);
> -	temp = *ptr;
> -	*ptr = x;
> -	_atomic_spin_unlock_irqrestore(ptr, flags);
> +	_atomic_spin_lock_irqsave(p, flags);
> +	temp = *p;
> +	*p = x;
> +	_atomic_spin_unlock_irqrestore(p, flags);
>  	return temp;
>  }
>  #endif
> @@ -33,12 +34,13 @@ unsigned long __xchg64(unsigned long x, unsigned long *ptr)
>  unsigned long __xchg32(int x, int *ptr)
>  {
>  	unsigned long flags;
> +	volatile int *p = ptr;
>  	long temp;
>
> -	_atomic_spin_lock_irqsave(ptr, flags);
> -	temp = (long) *ptr;	/* XXX - sign extension wanted? */
> -	*ptr = x;
> -	_atomic_spin_unlock_irqrestore(ptr, flags);
> +	_atomic_spin_lock_irqsave(p, flags);
> +	temp = (long) *p;	/* XXX - sign extension wanted? */
> +	*p = x;
> +	_atomic_spin_unlock_irqrestore(p, flags);
>  	return (unsigned long)temp;
>  }
>
> @@ -46,12 +48,13 @@ unsigned long __xchg32(int x, int *ptr)
>  unsigned long __xchg8(char x, char *ptr)
>  {
>  	unsigned long flags;
> +	volatile char *p = ptr;
>  	long temp;
>
> -	_atomic_spin_lock_irqsave(ptr, flags);
> -	temp = (long) *ptr;	/* XXX - sign extension wanted? */
> -	*ptr = x;
> -	_atomic_spin_unlock_irqrestore(ptr, flags);
> +	_atomic_spin_lock_irqsave(p, flags);
> +	temp = (long) *p;	/* XXX - sign extension wanted? */
> +	*p = x;
> +	_atomic_spin_unlock_irqrestore(p, flags);
>  	return (unsigned long)temp;
>  }
>





[Index of Archives]     [Linux SoC]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux