[PATCHv5] atomic: add *_dec_not_zero

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Introduce an *_dec_not_zero operation.  Make this a special case of
*_add_unless because batman-adv uses atomic_dec_not_zero in different
places like re-broadcast queue or aggregation queue management. There
are other non-final patches which may also want to use this macro.

Suggested-by: David S. Miller <davem@xxxxxxxxxxxxx>
Signed-off-by: Sven Eckelmann <sven@xxxxxxxxxxxxx>
Cc: Randy Dunlap <rdunlap@xxxxxxxxxxxx>
Cc: Richard Henderson <rth@xxxxxxxxxxx>
Cc: Ivan Kokshaysky <ink@xxxxxxxxxxxxxxxxxxxx>
Cc: Matt Turner <mattst88@xxxxxxxxx>
Cc: Russell King <linux@xxxxxxxxxxxxxxxx>
Cc: Tony Luck <tony.luck@xxxxxxxxx>
Cc: Fenghua Yu <fenghua.yu@xxxxxxxxx>
Cc: Hirokazu Takata <takata@xxxxxxxxxxxxxx>
Cc: Ralf Baechle <ralf@xxxxxxxxxxxxxx>
Cc: Kyle McMartin <kyle@xxxxxxxxxxx>
Cc: Helge Deller <deller@xxxxxx>
Cc: "James E.J. Bottomley" <jejb@xxxxxxxxxxxxxxxx>
Cc: Benjamin Herrenschmidt <benh@xxxxxxxxxxxxxxxxxxx>
Cc: Paul Mackerras <paulus@xxxxxxxxx>
Cc: Martin Schwidefsky <schwidefsky@xxxxxxxxxx>
Cc: Heiko Carstens <heiko.carstens@xxxxxxxxxx>
Cc: linux390@xxxxxxxxxx
Cc: "David S. Miller" <davem@xxxxxxxxxxxxx>
Cc: Chris Metcalf <cmetcalf@xxxxxxxxxx>
Cc: Jeff Dike <jdike@xxxxxxxxxxx>
Cc: Richard Weinberger <richard@xxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: "H. Peter Anvin" <hpa@xxxxxxxxx>
Cc: x86@xxxxxxxxxx
Cc: Arnd Bergmann <arnd@xxxxxxxx>
Cc: linux-doc@xxxxxxxxxxxxxxx
Cc: linux-kernel@xxxxxxxxxxxxxxx
Cc: linux-alpha@xxxxxxxxxxxxxxx
Cc: linux-arm-kernel@xxxxxxxxxxxxxxxxxxx
Cc: linux-ia64@xxxxxxxxxxxxxxx
Cc: linux-m32r@xxxxxxxxxxxxxxxxx
Cc: linux-m32r-ja@xxxxxxxxxxxxxxxxx
Cc: linux-mips@xxxxxxxxxxxxxx
Cc: linux-parisc@xxxxxxxxxxxxxxx
Cc: linuxppc-dev@xxxxxxxxxxxxxxxx
Cc: linux-s390@xxxxxxxxxxxxxxx
Cc: sparclinux@xxxxxxxxxxxxxxx
Cc: user-mode-linux-devel@xxxxxxxxxxxxxxxxxxxxx
Cc: linux-hexagon@xxxxxxxxxxxxxxx
---
David S. Miller recommended this change in
 https://lists.open-mesh.org/pipermail/b.a.t.m.a.n/2011-May/004560.html

Arnd Bergmann wanted to apply it in 201106172320.26476.arnd@xxxxxxxx

... and then Arun Sharma created a big merge conflict with
https://lkml.org/lkml/2011/6/6/430

I don't think that it is a a good idea to assume that everyone still
agrees with the patch after I've rewritten it. I try Andrew Morton this
time because he applied the last changes to include/linux/atomic.h.
There are already two projects (batman-adv/fault_inject) which include
their private definition (not conflicting) and I have patches for 9
other modules that could use it.


 Documentation/atomic_ops.txt       |    1 +
 arch/alpha/include/asm/atomic.h    |    1 +
 arch/alpha/include/asm/local.h     |    1 +
 arch/arm/include/asm/atomic.h      |    1 +
 arch/hexagon/include/asm/atomic.h  |    1 +
 arch/ia64/include/asm/atomic.h     |    1 +
 arch/m32r/include/asm/local.h      |    1 +
 arch/mips/include/asm/atomic.h     |    1 +
 arch/mips/include/asm/local.h      |    1 +
 arch/parisc/include/asm/atomic.h   |    1 +
 arch/powerpc/include/asm/atomic.h  |    1 +
 arch/powerpc/include/asm/local.h   |    1 +
 arch/s390/include/asm/atomic.h     |    1 +
 arch/sparc/include/asm/atomic_64.h |    1 +
 arch/tile/include/asm/atomic_32.h  |    1 +
 arch/tile/include/asm/atomic_64.h  |    1 +
 arch/x86/include/asm/atomic64_32.h |   12 ++++++++++++
 arch/x86/include/asm/atomic64_64.h |    1 +
 arch/x86/include/asm/local.h       |    1 +
 arch/x86/lib/atomic64_32.c         |    4 ++++
 arch/x86/lib/atomic64_386_32.S     |   21 +++++++++++++++++++++
 arch/x86/lib/atomic64_cx8_32.S     |   28 ++++++++++++++++++++++++++++
 include/asm-generic/atomic-long.h  |    2 ++
 include/asm-generic/atomic64.h     |    1 +
 include/asm-generic/local.h        |    1 +
 include/asm-generic/local64.h      |    2 ++
 include/linux/atomic.h             |    9 +++++++++
 lib/atomic64_test.c                |   19 +++++++++++++++++++
 28 files changed, 117 insertions(+), 0 deletions(-)

diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt
index 3bd585b..1eec221 100644
--- a/Documentation/atomic_ops.txt
+++ b/Documentation/atomic_ops.txt
@@ -190,6 +190,7 @@ atomic_add_unless requires explicit memory barriers around the operation
 unless it fails (returns 0).
 
 atomic_inc_not_zero, equivalent to atomic_add_unless(v, 1, 0)
+atomic_dec_not_zero, equivalent to atomic_add_unless(v, -1, 0)
 
 
 If a caller requires memory barrier semantics around an atomic_t
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 640f909..09d1571 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -225,6 +225,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
 }
 
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+#define atomic64_dec_not_zero(v) atomic64_add_unless((v), -1, 0)
 
 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
diff --git a/arch/alpha/include/asm/local.h b/arch/alpha/include/asm/local.h
index 9c94b84..51eb678 100644
--- a/arch/alpha/include/asm/local.h
+++ b/arch/alpha/include/asm/local.h
@@ -79,6 +79,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
 	c != (u);						\
 })
 #define local_inc_not_zero(l) local_add_unless((l), 1, 0)
+#define local_dec_not_zero(l) local_add_unless((l), -1, 0)
 
 #define local_add_negative(a, l) (local_add_return((a), (l)) < 0)
 
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 86976d0..80ed975 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -458,6 +458,7 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
 #define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
 #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
 #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
+#define atomic64_dec_not_zero(v)	atomic64_add_unless((v), -1LL, 0LL)
 
 #endif /* !CONFIG_GENERIC_ATOMIC64 */
 #endif
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index e220f90..3a1a33a 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -148,6 +148,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 }
 
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
 
 #define atomic_inc(v) atomic_add(1, (v))
 #define atomic_dec(v) atomic_sub(1, (v))
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 3fad89e..af6e9b2 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -122,6 +122,7 @@ static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
 }
 
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+#define atomic64_dec_not_zero(v) atomic64_add_unless((v), -1, 0)
 
 #define atomic_add_return(i,v)						\
 ({									\
diff --git a/arch/m32r/include/asm/local.h b/arch/m32r/include/asm/local.h
index 734bca8..d536082 100644
--- a/arch/m32r/include/asm/local.h
+++ b/arch/m32r/include/asm/local.h
@@ -272,6 +272,7 @@ static inline int local_add_unless(local_t *l, long a, long u)
 }
 
 #define local_inc_not_zero(l) local_add_unless((l), 1, 0)
+#define local_dec_not_zero(l) local_add_unless((l), -1, 0)
 
 static inline void local_clear_mask(unsigned long  mask, local_t *addr)
 {
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 1d93f81..babb043 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -697,6 +697,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
 }
 
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+#define atomic64_dec_not_zero(v) atomic64_add_unless((v), -1, 0)
 
 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
index 94fde8d..0242256 100644
--- a/arch/mips/include/asm/local.h
+++ b/arch/mips/include/asm/local.h
@@ -137,6 +137,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
 	c != (u);						\
 })
 #define local_inc_not_zero(l) local_add_unless((l), 1, 0)
+#define local_dec_not_zero(l) local_add_unless((l), -1, 0)
 
 #define local_dec_return(l) local_sub_return(1, (l))
 #define local_inc_return(l) local_add_return(1, (l))
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 4054b31..57b7069 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -334,6 +334,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
 }
 
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+#define atomic64_dec_not_zero(v) atomic64_add_unless((v), -1, 0)
 
 #endif /* !CONFIG_64BIT */
 
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 02e41b5..0798420 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -468,6 +468,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
 }
 
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+#define atomic64_dec_not_zero(v) atomic64_add_unless((v), -1, 0)
 
 #endif /* __powerpc64__ */
 
diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
index b8da913..d182e34 100644
--- a/arch/powerpc/include/asm/local.h
+++ b/arch/powerpc/include/asm/local.h
@@ -134,6 +134,7 @@ static __inline__ int local_add_unless(local_t *l, long a, long u)
 }
 
 #define local_inc_not_zero(l) local_add_unless((l), 1, 0)
+#define local_dec_not_zero(l) local_add_unless((l), -1, 0)
 
 #define local_sub_and_test(a, l)	(local_sub_return((a), (l)) == 0)
 #define local_dec_and_test(l)		(local_dec_return((l)) == 0)
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 8517d2a..92e7d5d 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -325,6 +325,7 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
 #define atomic64_dec_return(_v)		atomic64_sub_return(1, _v)
 #define atomic64_dec_and_test(_v)	(atomic64_sub_return(1, _v) == 0)
 #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0)
+#define atomic64_dec_not_zero(v)	atomic64_add_unless((v), -1, 0)
 
 #define smp_mb__before_atomic_dec()	smp_mb()
 #define smp_mb__after_atomic_dec()	smp_mb()
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 9f421df..94cf160 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -106,6 +106,7 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
 }
 
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+#define atomic64_dec_not_zero(v) atomic64_add_unless((v), -1, 0)
 
 /* Atomic operations are already serializing */
 #define smp_mb__before_atomic_dec()	barrier()
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index c03349e..9cfafb3 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -233,6 +233,7 @@ static inline void atomic64_set(atomic64_t *v, u64 n)
 #define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
 #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
 #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
+#define atomic64_dec_not_zero(v)	atomic64_add_unless((v), -1LL, 0LL)
 
 /*
  * We need to barrier before modifying the word, since the _atomic_xxx()
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
index 27fe667..9c22f50 100644
--- a/arch/tile/include/asm/atomic_64.h
+++ b/arch/tile/include/asm/atomic_64.h
@@ -141,6 +141,7 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
 #define atomic64_add_negative(i, v)	(atomic64_add_return((i), (v)) < 0)
 
 #define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0)
+#define atomic64_dec_not_zero(v)	atomic64_add_unless((v), -1, 0)
 
 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
 #define smp_mb__before_atomic_dec()	smp_mb()
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 24098aa..3cd4431 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -287,6 +287,18 @@ static inline int atomic64_inc_not_zero(atomic64_t *v)
 	return r;
 }
 
+
+static inline int atomic64_dec_not_zero(atomic64_t *v)
+{
+	int r;
+	asm volatile(ATOMIC64_ALTERNATIVE(dec_not_zero)
+		     : "=a" (r)
+		     : "S" (v)
+		     : "ecx", "edx", "memory"
+		     );
+	return r;
+}
+
 static inline long long atomic64_dec_if_positive(atomic64_t *v)
 {
 	long long r;
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 0e1cbfc..539e41b 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -216,6 +216,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
 }
 
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+#define atomic64_dec_not_zero(v) atomic64_add_unless((v), -1, 0)
 
 /*
  * atomic64_dec_if_positive - decrement by 1 if old value positive
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
index 9cdae5d..2c8c92d 100644
--- a/arch/x86/include/asm/local.h
+++ b/arch/x86/include/asm/local.h
@@ -185,6 +185,7 @@ static inline long local_sub_return(long i, local_t *l)
 	c != (u);						\
 })
 #define local_inc_not_zero(l) local_add_unless((l), 1, 0)
+#define local_dec_not_zero(l) local_add_unless((l), -1, 0)
 
 /* On x86_32, these are no better than the atomic variants.
  * On x86-64 these are better than the atomic variants on SMP kernels
diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
index 042f682..7da05c3 100644
--- a/arch/x86/lib/atomic64_32.c
+++ b/arch/x86/lib/atomic64_32.c
@@ -24,6 +24,8 @@ long long atomic64_dec_if_positive_cx8(atomic64_t *v);
 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
 int atomic64_inc_not_zero_cx8(atomic64_t *v);
 EXPORT_SYMBOL(atomic64_inc_not_zero_cx8);
+int atomic64_dec_not_zero_cx8(atomic64_t *v);
+EXPORT_SYMBOL(atomic64_dec_not_zero_cx8);
 int atomic64_add_unless_cx8(atomic64_t *v, long long a, long long u);
 EXPORT_SYMBOL(atomic64_add_unless_cx8);
 
@@ -54,6 +56,8 @@ long long atomic64_dec_if_positive_386(atomic64_t *v);
 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
 int atomic64_inc_not_zero_386(atomic64_t *v);
 EXPORT_SYMBOL(atomic64_inc_not_zero_386);
+int atomic64_dec_not_zero_386(atomic64_t *v);
+EXPORT_SYMBOL(atomic64_dec_not_zero_386);
 int atomic64_add_unless_386(atomic64_t *v, long long a, long long u);
 EXPORT_SYMBOL(atomic64_add_unless_386);
 #endif
diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
index e8e7e0d..c78337b 100644
--- a/arch/x86/lib/atomic64_386_32.S
+++ b/arch/x86/lib/atomic64_386_32.S
@@ -181,6 +181,27 @@ ENDP
 #undef v
 
 #define v %esi
+BEGIN(dec_not_zero)
+	movl  (v), %eax
+	movl 4(v), %edx
+	testl %eax, %eax
+	je 3f
+1:
+	subl $1, %eax
+	sbbl $0, %edx
+	movl %eax,  (v)
+	movl %edx, 4(v)
+	movl $1, %eax
+2:
+	RET
+3:
+	testl %edx, %edx
+	jne 1b
+	jmp 2b
+ENDP
+#undef v
+
+#define v %esi
 BEGIN(dec_if_positive)
 	movl  (v), %eax
 	movl 4(v), %edx
diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
index 391a083..989638c 100644
--- a/arch/x86/lib/atomic64_cx8_32.S
+++ b/arch/x86/lib/atomic64_cx8_32.S
@@ -220,3 +220,31 @@ ENTRY(atomic64_inc_not_zero_cx8)
 	jmp 3b
 	CFI_ENDPROC
 ENDPROC(atomic64_inc_not_zero_cx8)
+
+ENTRY(atomic64_dec_not_zero_cx8)
+	CFI_STARTPROC
+	SAVE ebx
+
+	read64 %esi
+1:
+	testl %eax, %eax
+	je 4f
+2:
+	movl %eax, %ebx
+	movl %edx, %ecx
+	subl $1, %ebx
+	sbbl $0, %ecx
+	LOCK_PREFIX
+	cmpxchg8b (%esi)
+	jne 1b
+
+	movl $1, %eax
+3:
+	RESTORE ebx
+	ret
+4:
+	testl %edx, %edx
+	jne 2b
+	jmp 3b
+	CFI_ENDPROC
+ENDPROC(atomic64_dec_not_zero_cx8)
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
index b7babf0..0fe75ab 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -130,6 +130,7 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
 }
 
 #define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
+#define atomic_long_dec_not_zero(l) atomic64_dec_not_zero((atomic64_t *)(l))
 
 #define atomic_long_cmpxchg(l, old, new) \
 	(atomic64_cmpxchg((atomic64_t *)(l), (old), (new)))
@@ -247,6 +248,7 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
 }
 
 #define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l))
+#define atomic_long_dec_not_zero(l) atomic_dec_not_zero((atomic_t *)(l))
 
 #define atomic_long_cmpxchg(l, old, new) \
 	(atomic_cmpxchg((atomic_t *)(l), (old), (new)))
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index b18ce4f..90ff9b1 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -38,5 +38,6 @@ extern int	 atomic64_add_unless(atomic64_t *v, long long a, long long u);
 #define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
 #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
 #define atomic64_inc_not_zero(v) 	atomic64_add_unless((v), 1LL, 0LL)
+#define atomic64_dec_not_zero(v)	atomic64_add_unless((v), -1LL, 0LL)
 
 #endif  /*  _ASM_GENERIC_ATOMIC64_H  */
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
index 9ceb03b..fabf4f3 100644
--- a/include/asm-generic/local.h
+++ b/include/asm-generic/local.h
@@ -44,6 +44,7 @@ typedef struct
 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
+#define local_dec_not_zero(l) atomic_long_dec_not_zero(&(l)->a)
 
 /* Non-atomic variants, ie. preemption disabled and won't be touched
  * in interrupt, etc.  Some archs can optimize this case well. */
diff --git a/include/asm-generic/local64.h b/include/asm-generic/local64.h
index 5980002..76acbe2 100644
--- a/include/asm-generic/local64.h
+++ b/include/asm-generic/local64.h
@@ -45,6 +45,7 @@ typedef struct {
 #define local64_xchg(l, n)	local_xchg((&(l)->a), (n))
 #define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u))
 #define local64_inc_not_zero(l)	local_inc_not_zero(&(l)->a)
+#define local64_dec_not_zero(l)	local_dec_not_zero(&(l)->a)
 
 /* Non-atomic variants, ie. preemption disabled and won't be touched
  * in interrupt, etc.  Some archs can optimize this case well. */
@@ -83,6 +84,7 @@ typedef struct {
 #define local64_xchg(l, n)	atomic64_xchg((&(l)->a), (n))
 #define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u))
 #define local64_inc_not_zero(l)	atomic64_inc_not_zero(&(l)->a)
+#define local64_dec_not_zero(l)	atomic64_dec_not_zero(&(l)->a)
 
 /* Non-atomic variants, ie. preemption disabled and won't be touched
  * in interrupt, etc.  Some archs can optimize this case well. */
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 42b77b5..ad2b750 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -27,6 +27,15 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
 #define atomic_inc_not_zero(v)		atomic_add_unless((v), 1, 0)
 
 /**
+ * atomic_dec_not_zero - decrement unless the number is zero
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1, so long as @v is non-zero.
+ * Returns non-zero if @v was non-zero, and zero otherwise.
+ */
+#define atomic_dec_not_zero(v)		atomic_add_unless((v), -1, 0)
+
+/**
  * atomic_inc_not_zero_hint - increment if not null
  * @v: pointer of type atomic_t
  * @hint: probable value of the atomic before the increment
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 0c33cde..b7468b7 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -145,6 +145,25 @@ static __init int test_atomic64(void)
 	r += one;
 	BUG_ON(v.counter != r);
 
+	INIT(onestwos);
+	BUG_ON(!atomic64_dec_not_zero(&v));
+	r -= one;
+	BUG_ON(v.counter != r);
+
+	INIT(0);
+	BUG_ON(atomic64_dec_not_zero(&v));
+	BUG_ON(v.counter != r);
+
+	INIT(one);
+	BUG_ON(!atomic64_dec_not_zero(&v));
+	r -= one;
+	BUG_ON(v.counter != r);
+
+	INIT(-one);
+	BUG_ON(!atomic64_dec_not_zero(&v));
+	r -= one;
+	BUG_ON(v.counter != r);
+
 #ifdef CONFIG_X86
 	printk(KERN_INFO "atomic64 test passed for %s platform %s CX8 and %s SSE\n",
 #ifdef CONFIG_X86_64
-- 
1.7.7.3




[Index of Archives]     [Linux MIPS Home]     [LKML Archive]     [Linux ARM Kernel]     [Linux ARM]     [Linux]     [Git]     [Yosemite News]     [Linux SCSI]     [Linux Hams]

  Powered by Linux