On Sun, May 28, 2017 at 11:34 AM, <hpa@xxxxxxxxx> wrote: > On May 28, 2017 2:29:32 AM PDT, Dmitry Vyukov <dvyukov@xxxxxxxxxx> wrote: >>On Sun, May 28, 2017 at 1:02 AM, <hpa@xxxxxxxxx> wrote: >>> On May 26, 2017 12:09:04 PM PDT, Dmitry Vyukov <dvyukov@xxxxxxxxxx> >>wrote: >>>>Some 64-bit atomic operations use 'long long' as operand/return type >>>>(e.g. asm-generic/atomic64.h, arch/x86/include/asm/atomic64_32.h); >>>>while others use 'long' (e.g. arch/x86/include/asm/atomic64_64.h). >>>>This makes it impossible to write portable code. >>>>For example, there is no format specifier that prints result of >>>>atomic64_read() without warnings. atomic64_try_cmpxchg() is almost >>>>impossible to use in portable fashion because it requires either >>>>'long *' or 'long long *' as argument depending on arch. >>>> >>>>Switch arch/x86/include/asm/atomic64_64.h to 'long long'. >>>> >>>>Signed-off-by: Dmitry Vyukov <dvyukov@xxxxxxxxxx> >>>>Cc: Mark Rutland <mark.rutland@xxxxxxx> >>>>Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> >>>>Cc: Will Deacon <will.deacon@xxxxxxx> >>>>Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> >>>>Cc: Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx> >>>>Cc: Ingo Molnar <mingo@xxxxxxxxxx> >>>>Cc: kasan-dev@xxxxxxxxxxxxxxxx >>>>Cc: linux-mm@xxxxxxxxx >>>>Cc: linux-kernel@xxxxxxxxxxxxxxx >>>>Cc: x86@xxxxxxxxxx >>>> >>>>--- >>>>Changes since v1: >>>> - reverted stray s/long/long long/ replace in comment >>>> - added arch/s390 changes to fix build errors/warnings >>>>--- [snip] >>> NAK - this is what u64/s64 is for. >> >> >>Hi, >> >>Patch 3 adds atomic-instrumented.h which now contains: >> >>+static __always_inline long long atomic64_read(const atomic64_t *v) >>+{ >>+ return arch_atomic64_read(v); >>+} >> >>without this patch that will become >> >>+static __always_inline s64 atomic64_read(const atomic64_t *v) >> >>Right? > > Yes. I see that s64 is not the same as long on x86_64 (long long vs long), so it's still not possible to e.g. portably print a result of atomic_read(). But it's a separate issue that we don't need to solve now. Also all wrappers like: void atomic64_set(atomic64_t *v, s64 i) { arch_atomic64_set(v, i); } lead to type conversions, but at least my compiler does not bark on it. The only remaining problem is with atomic64_try_cmpxchg, which is simply not possible to use now (not possible to declare the *old type). I will need something along the following lines to fix it (then callers can use s64* for old). Sounds good? --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h @@ -177,7 +177,7 @@ static inline long arch_atomic64_cmpxchg(atomic64_t *v, long old, long new) } #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg -static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, long *old, long new) +static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, long new) { return try_cmpxchg(&v->counter, old, new); } @@ -198,7 +198,7 @@ static inline long arch_atomic64_xchg(atomic64_t *v, long new) */ static inline bool arch_atomic64_add_unless(atomic64_t *v, long a, long u) { - long c = arch_atomic64_read(v); + s64 c = arch_atomic64_read(v); do { if (unlikely(c == u)) return false; @@ -217,7 +217,7 @@ static inline bool arch_atomic64_add_unless(atomic64_t *v, long a, long u) */ static inline long arch_atomic64_dec_if_positive(atomic64_t *v) { - long dec, c = arch_atomic64_read(v); + s64 dec, c = arch_atomic64_read(v); do { dec = c - 1; if (unlikely(dec < 0)) @@ -236,7 +236,7 @@ static inline void arch_atomic64_and(long i, atomic64_t *v) static inline long arch_atomic64_fetch_and(long i, atomic64_t *v) { - long val = arch_atomic64_read(v); + s64 val = arch_atomic64_read(v); do { } while (!arch_atomic64_try_cmpxchg(v, &val, val & i)); @@ -253,7 +253,7 @@ static inline void arch_atomic64_or(long i, atomic64_t *v) static inline long arch_atomic64_fetch_or(long i, atomic64_t *v) { - long val = arch_atomic64_read(v); + s64 val = arch_atomic64_read(v); do { } while (!arch_atomic64_try_cmpxchg(v, &val, val | i)); @@ -270,7 +270,7 @@ static inline void arch_atomic64_xor(long i, atomic64_t *v) static inline long arch_atomic64_fetch_xor(long i, atomic64_t *v) { - long val = arch_atomic64_read(v); + s64 val = arch_atomic64_read(v); do { } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i)); diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index e8cf95908fe5..9e2faa85eb02 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h @@ -157,7 +157,7 @@ extern void __add_wrong_size(void) #define __raw_try_cmpxchg(_ptr, _pold, _new, size, lock) \ ({ \ bool success; \ - __typeof__(_ptr) _old = (_pold); \ + __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \ __typeof__(*(_ptr)) __old = *_old; \ __typeof__(*(_ptr)) __new = (_new); \ switch (size) { \ -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>