Looks like future sh variants will support a 4-byte cas which will be used to implement 1 and 2 byte xchg. This is exactly what we do for llsc now, move the portable part of the code into a separate header so it's easy to reuse. Suggested-by: Rich Felker <dalias@xxxxxxxx> Signed-off-by: Michael S. Tsirkin <mst@xxxxxxxxxx> --- arch/sh/include/asm/cmpxchg-llsc.h | 35 +------------------------- arch/sh/include/asm/cmpxchg-xchg.h | 51 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 34 deletions(-) create mode 100644 arch/sh/include/asm/cmpxchg-xchg.h diff --git a/arch/sh/include/asm/cmpxchg-llsc.h b/arch/sh/include/asm/cmpxchg-llsc.h index e754794..fcfd322 100644 --- a/arch/sh/include/asm/cmpxchg-llsc.h +++ b/arch/sh/include/asm/cmpxchg-llsc.h @@ -1,9 +1,6 @@ #ifndef __ASM_SH_CMPXCHG_LLSC_H #define __ASM_SH_CMPXCHG_LLSC_H -#include <linux/bitops.h> -#include <asm/byteorder.h> - static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) { unsigned long retval; @@ -50,36 +47,6 @@ __cmpxchg_u32(volatile u32 *m, unsigned long old, unsigned long new) return retval; } -static inline u32 __xchg_cmpxchg(volatile void *ptr, u32 x, int size) -{ - int off = (unsigned long)ptr % sizeof(u32); - volatile u32 *p = ptr - off; -#ifdef __BIG_ENDIAN - int bitoff = (sizeof(u32) - 1 - off) * BITS_PER_BYTE; -#else - int bitoff = off * BITS_PER_BYTE; -#endif - u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff; - u32 oldv, newv; - u32 ret; - - do { - oldv = READ_ONCE(*p); - ret = (oldv & bitmask) >> bitoff; - newv = (oldv & ~bitmask) | (x << bitoff); - } while (__cmpxchg_u32(p, oldv, newv) != oldv); - - return ret; -} - -static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val) -{ - return __xchg_cmpxchg(m, val, sizeof *m); -} - -static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) -{ - return __xchg_cmpxchg(m, val, sizeof *m); -} +#include <asm/cmpxchg-xchg.h> #endif /* __ASM_SH_CMPXCHG_LLSC_H */ diff --git a/arch/sh/include/asm/cmpxchg-xchg.h b/arch/sh/include/asm/cmpxchg-xchg.h new file mode 100644 index 0000000..7219719 --- /dev/null +++ b/arch/sh/include/asm/cmpxchg-xchg.h @@ -0,0 +1,51 @@ +#ifndef __ASM_SH_CMPXCHG_XCHG_H +#define __ASM_SH_CMPXCHG_XCHG_H + +/* + * Copyright (C) 2016 Red Hat, Inc. + * Author: Michael S. Tsirkin <mst@xxxxxxxxxx> + * + * This work is licensed under the terms of the GNU GPL, version 2. See the + * file "COPYING" in the main directory of this archive for more details. + */ +#include <linux/bitops.h> +#include <asm/byteorder.h> + +/* + * Portable implementations of 1 and 2 byte xchg using a 4 byte cmpxchg. + * Note: this header isn't self-contained: before including it, __cmpxchg_u32 + * must be defined first. + */ +static inline u32 __xchg_cmpxchg(volatile void *ptr, u32 x, int size) +{ + int off = (unsigned long)ptr % sizeof(u32); + volatile u32 *p = ptr - off; +#ifdef __BIG_ENDIAN + int bitoff = (sizeof(u32) - 1 - off) * BITS_PER_BYTE; +#else + int bitoff = off * BITS_PER_BYTE; +#endif + u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff; + u32 oldv, newv; + u32 ret; + + do { + oldv = READ_ONCE(*p); + ret = (oldv & bitmask) >> bitoff; + newv = (oldv & ~bitmask) | (x << bitoff); + } while (__cmpxchg_u32(p, oldv, newv) != oldv); + + return ret; +} + +static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val) +{ + return __xchg_cmpxchg(m, val, sizeof *m); +} + +static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) +{ + return __xchg_cmpxchg(m, val, sizeof *m); +} + +#endif /* __ASM_SH_CMPXCHG_XCHG_H */ -- MST -- To unsubscribe from this list: send the line "unsubscribe linux-metag" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html