Commit-ID: 0203fdc160a8c8d8651a3b79aa453ec36cfbd867 Gitweb: https://git.kernel.org/tip/0203fdc160a8c8d8651a3b79aa453ec36cfbd867 Author: Mark Rutland <mark.rutland@xxxxxxx> AuthorDate: Wed, 22 May 2019 14:22:36 +0100 Committer: Ingo Molnar <mingo@xxxxxxxxxx> CommitDate: Mon, 3 Jun 2019 12:32:56 +0200 locking/atomic, alpha: Use s64 for atomic64 As a step towards making the atomic64 API use consistent types treewide, let's have the alpha atomic64 implementation use s64 as the underlying type for atomic64_t, rather than long, matching the generated headers. As atomic64_read() depends on the generic defintion of atomic64_t, this still returns long. This will be converted in a subsequent patch. Otherwise, there should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@xxxxxxx> Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Cc: Ivan Kokshaysky <ink@xxxxxxxxxxxxxxxxxxxx> Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Cc: Matt Turner <mattst88@xxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Richard Henderson <rth@xxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Will Deacon <will.deacon@xxxxxxx> Cc: aou@xxxxxxxxxxxxxxxxx Cc: arnd@xxxxxxxx Cc: bp@xxxxxxxxx Cc: catalin.marinas@xxxxxxx Cc: davem@xxxxxxxxxxxxx Cc: fenghua.yu@xxxxxxxxx Cc: heiko.carstens@xxxxxxxxxx Cc: herbert@xxxxxxxxxxxxxxxxxxx Cc: jhogan@xxxxxxxxxx Cc: linux@xxxxxxxxxxxxxxx Cc: mpe@xxxxxxxxxxxxxx Cc: palmer@xxxxxxxxxx Cc: paul.burton@xxxxxxxx Cc: paulus@xxxxxxxxx Cc: ralf@xxxxxxxxxxxxxx Cc: tony.luck@xxxxxxxxx Cc: vgupta@xxxxxxxxxxxx Link: https://lkml.kernel.org/r/20190522132250.26499-5-mark.rutland@xxxxxxx Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx> --- arch/alpha/include/asm/atomic.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 150a1c5d6a2c..2144530d1428 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -93,9 +93,9 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ } #define ATOMIC64_OP(op, asm_op) \ -static __inline__ void atomic64_##op(long i, atomic64_t * v) \ +static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \ { \ - unsigned long temp; \ + s64 temp; \ __asm__ __volatile__( \ "1: ldq_l %0,%1\n" \ " " #asm_op " %0,%2,%0\n" \ @@ -109,9 +109,9 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ } \ #define ATOMIC64_OP_RETURN(op, asm_op) \ -static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ +static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \ { \ - long temp, result; \ + s64 temp, result; \ __asm__ __volatile__( \ "1: ldq_l %0,%1\n" \ " " #asm_op " %0,%3,%2\n" \ @@ -128,9 +128,9 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ } #define ATOMIC64_FETCH_OP(op, asm_op) \ -static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ +static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \ { \ - long temp, result; \ + s64 temp, result; \ __asm__ __volatile__( \ "1: ldq_l %2,%1\n" \ " " #asm_op " %2,%3,%0\n" \ @@ -246,9 +246,9 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) * Atomically adds @a to @v, so long as it was not @u. * Returns the old value of @v. */ -static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u) +static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { - long c, new, old; + s64 c, new, old; smp_mb(); __asm__ __volatile__( "1: ldq_l %[old],%[mem]\n" @@ -276,9 +276,9 @@ static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u) * The function returns the old value of *v minus 1, even if * the atomic variable, v, was not decremented. */ -static inline long atomic64_dec_if_positive(atomic64_t *v) +static inline s64 atomic64_dec_if_positive(atomic64_t *v) { - long old, tmp; + s64 old, tmp; smp_mb(); __asm__ __volatile__( "1: ldq_l %[old],%[mem]\n"