From: Greg Ungerer <gerg@xxxxxxxxxxx> The ColdFire processors have a much more limited set of addressing modes that can be used for most instructions. A number of the atomic operations have already been fixed to limit the addressing modes used with add and sub instructions when building for ColdFire. But we missed a few. Fix the remaining atomic operations to be clean for ColdFire processors. Signed-off-by: Greg Ungerer <gerg@xxxxxxxxxxx> --- arch/m68k/include/asm/atomic.h | 6 +++--- 1 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h index 03ae3d1..307a573 100644 --- a/arch/m68k/include/asm/atomic.h +++ b/arch/m68k/include/asm/atomic.h @@ -169,18 +169,18 @@ static inline int atomic_add_negative(int i, atomic_t *v) char c; __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v) - : "id" (i)); + : ASM_DI (i)); return c != 0; } static inline void atomic_clear_mask(unsigned long mask, unsigned long *v) { - __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask))); + __asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask))); } static inline void atomic_set_mask(unsigned long mask, unsigned long *v) { - __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); + __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask)); } static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) -- 1.7.0.4 -- To unsubscribe from this list: send the line "unsubscribe linux-m68k" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html