Some 64-bit atomic operations use 'long long' as operand/return type (e.g. asm-generic/atomic64.h, arch/x86/include/asm/atomic64_32.h); while others use 'long' (e.g. arch/x86/include/asm/atomic64_64.h). This makes it impossible to write portable code. For example, there is no format specifier that prints result of atomic64_read() without warnings. atomic64_try_cmpxchg() is almost impossible to use in portable fashion because it requires either 'long *' or 'long long *' as argument depending on arch. Switch arch/x86/include/asm/atomic64_64.h to 'long long'. Signed-off-by: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: Mark Rutland <mark.rutland@xxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Will Deacon <will.deacon@xxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: kasan-dev@xxxxxxxxxxxxxxxx Cc: linux-mm@xxxxxxxxx Cc: linux-kernel@xxxxxxxxxxxxxxx Cc: x86@xxxxxxxxxx --- arch/x86/include/asm/atomic64_64.h | 54 +++++++++++++++++++------------------- include/linux/types.h | 2 +- 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index 8db8879a6d8c..a62982a2b534 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h @@ -16,7 +16,7 @@ * Atomically reads the value of @v. * Doesn't imply a read memory barrier. */ -static inline long atomic64_read(const atomic64_t *v) +static inline long long atomic64_read(const atomic64_t *v) { return READ_ONCE((v)->counter); } @@ -28,7 +28,7 @@ static inline long atomic64_read(const atomic64_t *v) * * Atomically sets the value of @v to @i. */ -static inline void atomic64_set(atomic64_t *v, long i) +static inline void atomic64_set(atomic64_t *v, long long i) { WRITE_ONCE(v->counter, i); } @@ -40,7 +40,7 @@ static inline void atomic64_set(atomic64_t *v, long i) * * Atomically adds @i to @v. */ -static __always_inline void atomic64_add(long i, atomic64_t *v) +static __always_inline void atomic64_add(long long i, atomic64_t *v) { asm volatile(LOCK_PREFIX "addq %1,%0" : "=m" (v->counter) @@ -54,7 +54,7 @@ static __always_inline void atomic64_add(long i, atomic64_t *v) * * Atomically subtracts @i from @v. */ -static inline void atomic64_sub(long i, atomic64_t *v) +static inline void atomic64_sub(long long i, atomic64_t *v) { asm volatile(LOCK_PREFIX "subq %1,%0" : "=m" (v->counter) @@ -70,7 +70,7 @@ static inline void atomic64_sub(long i, atomic64_t *v) * true if the result is zero, or false for all * other cases. */ -static inline bool atomic64_sub_and_test(long i, atomic64_t *v) +static inline bool atomic64_sub_and_test(long long i, atomic64_t *v) { GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e); } @@ -136,7 +136,7 @@ static inline bool atomic64_inc_and_test(atomic64_t *v) * if the result is negative, or false when * result is greater than or equal to zero. */ -static inline bool atomic64_add_negative(long i, atomic64_t *v) +static inline bool atomic64_add_negative(long long i, atomic64_t *v) { GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s); } @@ -148,22 +148,22 @@ static inline bool atomic64_add_negative(long i, atomic64_t *v) * * Atomically adds @i to @v and returns @i + @v */ -static __always_inline long atomic64_add_return(long i, atomic64_t *v) +static __always_inline long long atomic64_add_return(long long i, atomic64_t *v) { return i + xadd(&v->counter, i); } -static inline long atomic64_sub_return(long i, atomic64_t *v) +static inline long long atomic64_sub_return(long long i, atomic64_t *v) { return atomic64_add_return(-i, v); } -static inline long atomic64_fetch_add(long i, atomic64_t *v) +static inline long long atomic64_fetch_add(long long i, atomic64_t *v) { return xadd(&v->counter, i); } -static inline long atomic64_fetch_sub(long i, atomic64_t *v) +static inline long long atomic64_fetch_sub(long long i, atomic64_t *v) { return xadd(&v->counter, -i); } @@ -171,18 +171,18 @@ static inline long atomic64_fetch_sub(long i, atomic64_t *v) #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) -static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) +static inline long long atomic64_cmpxchg(atomic64_t *v, long long old, long long new) { return cmpxchg(&v->counter, old, new); } #define atomic64_try_cmpxchg atomic64_try_cmpxchg -static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, long *old, long new) +static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, long long *old, long long new) { return try_cmpxchg(&v->counter, old, new); } -static inline long atomic64_xchg(atomic64_t *v, long new) +static inline long long atomic64_xchg(atomic64_t *v, long long new) { return xchg(&v->counter, new); } @@ -193,12 +193,12 @@ static inline long atomic64_xchg(atomic64_t *v, long new) * @a: the amount to add to v... * @u: ...unless v is equal to u. * - * Atomically adds @a to @v, so long as it was not @u. + * Atomically adds @a to @v, so long long as it was not @u. * Returns the old value of @v. */ -static inline bool atomic64_add_unless(atomic64_t *v, long a, long u) +static inline bool atomic64_add_unless(atomic64_t *v, long long a, long long u) { - long c = atomic64_read(v); + long long c = atomic64_read(v); do { if (unlikely(c == u)) return false; @@ -215,9 +215,9 @@ static inline bool atomic64_add_unless(atomic64_t *v, long a, long u) * The function returns the old value of *v minus 1, even if * the atomic variable, v, was not decremented. */ -static inline long atomic64_dec_if_positive(atomic64_t *v) +static inline long long atomic64_dec_if_positive(atomic64_t *v) { - long dec, c = atomic64_read(v); + long long dec, c = atomic64_read(v); do { dec = c - 1; if (unlikely(dec < 0)) @@ -226,7 +226,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) return dec; } -static inline void atomic64_and(long i, atomic64_t *v) +static inline void atomic64_and(long long i, atomic64_t *v) { asm volatile(LOCK_PREFIX "andq %1,%0" : "+m" (v->counter) @@ -234,16 +234,16 @@ static inline void atomic64_and(long i, atomic64_t *v) : "memory"); } -static inline long atomic64_fetch_and(long i, atomic64_t *v) +static inline long long atomic64_fetch_and(long long i, atomic64_t *v) { - long val = atomic64_read(v); + long long val = atomic64_read(v); do { } while (!atomic64_try_cmpxchg(v, &val, val & i)); return val; } -static inline void atomic64_or(long i, atomic64_t *v) +static inline void atomic64_or(long long i, atomic64_t *v) { asm volatile(LOCK_PREFIX "orq %1,%0" : "+m" (v->counter) @@ -251,16 +251,16 @@ static inline void atomic64_or(long i, atomic64_t *v) : "memory"); } -static inline long atomic64_fetch_or(long i, atomic64_t *v) +static inline long long atomic64_fetch_or(long long i, atomic64_t *v) { - long val = atomic64_read(v); + long long val = atomic64_read(v); do { } while (!atomic64_try_cmpxchg(v, &val, val | i)); return val; } -static inline void atomic64_xor(long i, atomic64_t *v) +static inline void atomic64_xor(long long i, atomic64_t *v) { asm volatile(LOCK_PREFIX "xorq %1,%0" : "+m" (v->counter) @@ -268,9 +268,9 @@ static inline void atomic64_xor(long i, atomic64_t *v) : "memory"); } -static inline long atomic64_fetch_xor(long i, atomic64_t *v) +static inline long long atomic64_fetch_xor(long long i, atomic64_t *v) { - long val = atomic64_read(v); + long long val = atomic64_read(v); do { } while (!atomic64_try_cmpxchg(v, &val, val ^ i)); diff --git a/include/linux/types.h b/include/linux/types.h index 1e7bd24848fc..569fc6db1bd5 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -177,7 +177,7 @@ typedef struct { #ifdef CONFIG_64BIT typedef struct { - long counter; + long long counter; } atomic64_t; #endif -- 2.12.2.564.g063fe858b8-goog -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>