Commit-ID: d84e28d250150adc6526dcce4ca089e2b57430f3 Gitweb: https://git.kernel.org/tip/d84e28d250150adc6526dcce4ca089e2b57430f3 Author: Mark Rutland <mark.rutland@xxxxxxx> AuthorDate: Wed, 22 May 2019 14:22:40 +0100 Committer: Ingo Molnar <mingo@xxxxxxxxxx> CommitDate: Mon, 3 Jun 2019 12:32:56 +0200 locking/atomic, ia64: Use s64 for atomic64 As a step towards making the atomic64 API use consistent types treewide, let's have the ia64 atomic64 implementation use s64 as the underlying type for atomic64_t, rather than long or __s64, matching the generated headers. As atomic64_read() depends on the generic defintion of atomic64_t, this still returns long. This will be converted in a subsequent patch. Otherwise, there should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@xxxxxxx> Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Cc: Fenghua Yu <fenghua.yu@xxxxxxxxx> Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Tony Luck <tony.luck@xxxxxxxxx> Cc: Will Deacon <will.deacon@xxxxxxx> Cc: aou@xxxxxxxxxxxxxxxxx Cc: arnd@xxxxxxxx Cc: bp@xxxxxxxxx Cc: catalin.marinas@xxxxxxx Cc: davem@xxxxxxxxxxxxx Cc: heiko.carstens@xxxxxxxxxx Cc: herbert@xxxxxxxxxxxxxxxxxxx Cc: ink@xxxxxxxxxxxxxxxxxxxx Cc: jhogan@xxxxxxxxxx Cc: linux@xxxxxxxxxxxxxxx Cc: mattst88@xxxxxxxxx Cc: mpe@xxxxxxxxxxxxxx Cc: palmer@xxxxxxxxxx Cc: paul.burton@xxxxxxxx Cc: paulus@xxxxxxxxx Cc: ralf@xxxxxxxxxxxxxx Cc: rth@xxxxxxxxxxx Cc: vgupta@xxxxxxxxxxxx Link: https://lkml.kernel.org/r/20190522132250.26499-9-mark.rutland@xxxxxxx Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx> --- arch/ia64/include/asm/atomic.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 206530d0751b..50440f3ddc43 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -124,10 +124,10 @@ ATOMIC_FETCH_OP(xor, ^) #undef ATOMIC_OP #define ATOMIC64_OP(op, c_op) \ -static __inline__ long \ -ia64_atomic64_##op (__s64 i, atomic64_t *v) \ +static __inline__ s64 \ +ia64_atomic64_##op (s64 i, atomic64_t *v) \ { \ - __s64 old, new; \ + s64 old, new; \ CMPXCHG_BUGCHECK_DECL \ \ do { \ @@ -139,10 +139,10 @@ ia64_atomic64_##op (__s64 i, atomic64_t *v) \ } #define ATOMIC64_FETCH_OP(op, c_op) \ -static __inline__ long \ -ia64_atomic64_fetch_##op (__s64 i, atomic64_t *v) \ +static __inline__ s64 \ +ia64_atomic64_fetch_##op (s64 i, atomic64_t *v) \ { \ - __s64 old, new; \ + s64 old, new; \ CMPXCHG_BUGCHECK_DECL \ \ do { \ @@ -162,7 +162,7 @@ ATOMIC64_OPS(sub, -) #define atomic64_add_return(i,v) \ ({ \ - long __ia64_aar_i = (i); \ + s64 __ia64_aar_i = (i); \ __ia64_atomic_const(i) \ ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ : ia64_atomic64_add(__ia64_aar_i, v); \ @@ -170,7 +170,7 @@ ATOMIC64_OPS(sub, -) #define atomic64_sub_return(i,v) \ ({ \ - long __ia64_asr_i = (i); \ + s64 __ia64_asr_i = (i); \ __ia64_atomic_const(i) \ ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ : ia64_atomic64_sub(__ia64_asr_i, v); \ @@ -178,7 +178,7 @@ ATOMIC64_OPS(sub, -) #define atomic64_fetch_add(i,v) \ ({ \ - long __ia64_aar_i = (i); \ + s64 __ia64_aar_i = (i); \ __ia64_atomic_const(i) \ ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \ : ia64_atomic64_fetch_add(__ia64_aar_i, v); \ @@ -186,7 +186,7 @@ ATOMIC64_OPS(sub, -) #define atomic64_fetch_sub(i,v) \ ({ \ - long __ia64_asr_i = (i); \ + s64 __ia64_asr_i = (i); \ __ia64_atomic_const(i) \ ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \ : ia64_atomic64_fetch_sub(__ia64_asr_i, v); \
![]() |