Hi all, On Mon, 16 Aug 2021 11:47:40 +1000 Stephen Rothwell <sfr@xxxxxxxxxxxxxxxx> wrote: > > Today's linux-next merge of the tip tree got a conflict in: > > include/asm-generic/bitops/non-atomic.h > > between commit: > > 8f76f9c46952 ("bitops/non-atomic: make @nr unsigned to avoid any DIV") > > from the asm-generic tree and commit: > > cf3ee3c8c29d ("locking/atomic: add generic arch_*() bitops") > > from the tip tree. > > I fixed it up (see below) and can carry the fix as necessary. This > is now fixed as far as linux-next is concerned, but any non trivial > conflicts should be mentioned to your upstream maintainer when your tree > is submitted for merging. You may also want to consider cooperating > with the maintainer of the conflicting tree to minimise any particularly > complex conflicts. > > diff --cc include/asm-generic/bitops/non-atomic.h > index c5a7d8eb9c2b,365377fb104b..000000000000 > --- a/include/asm-generic/bitops/non-atomic.h > +++ b/include/asm-generic/bitops/non-atomic.h > @@@ -13,15 -13,18 +13,18 @@@ > * If it's called on the same region of memory simultaneously, the effect > * may be that only one operation succeeds. > */ > - static inline void __set_bit(unsigned int nr, volatile unsigned long *addr) > + static __always_inline void > -arch___set_bit(int nr, volatile unsigned long *addr) > ++arch___set_bit(unsigned int nr, volatile unsigned long *addr) > { > unsigned long mask = BIT_MASK(nr); > unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); > > *p |= mask; > } > + #define __set_bit arch___set_bit > > - static inline void __clear_bit(unsigned int nr, volatile unsigned long *addr) > + static __always_inline void > -arch___clear_bit(int nr, volatile unsigned long *addr) > ++arch___clear_bit(unsigned int nr, volatile unsigned long *addr) > { > unsigned long mask = BIT_MASK(nr); > unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); > @@@ -38,7 -42,8 +42,8 @@@ > * If it's called on the same region of memory simultaneously, the effect > * may be that only one operation succeeds. > */ > - static inline void __change_bit(unsigned int nr, volatile unsigned long *addr) > + static __always_inline > -void arch___change_bit(int nr, volatile unsigned long *addr) > ++void arch___change_bit(unsigned int nr, volatile unsigned long *addr) > { > unsigned long mask = BIT_MASK(nr); > unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); > @@@ -55,7 -61,8 +61,8 @@@ > * If two examples of this operation race, one can appear to succeed > * but actually fail. You must protect multiple accesses with a lock. > */ > - static inline int __test_and_set_bit(unsigned int nr, volatile unsigned long *addr) > + static __always_inline int > -arch___test_and_set_bit(int nr, volatile unsigned long *addr) > ++arch___test_and_set_bit(unsigned int nr, volatile unsigned long *addr) > { > unsigned long mask = BIT_MASK(nr); > unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); > @@@ -74,7 -82,8 +82,8 @@@ > * If two examples of this operation race, one can appear to succeed > * but actually fail. You must protect multiple accesses with a lock. > */ > - static inline int __test_and_clear_bit(unsigned int nr, volatile unsigned long *addr) > + static __always_inline int > -arch___test_and_clear_bit(int nr, volatile unsigned long *addr) > ++arch___test_and_clear_bit(unsigned int nr, volatile unsigned long *addr) > { > unsigned long mask = BIT_MASK(nr); > unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); > @@@ -83,10 -92,11 +92,11 @@@ > *p = old & ~mask; > return (old & mask) != 0; > } > + #define __test_and_clear_bit arch___test_and_clear_bit > > /* WARNING: non atomic and it can be reordered! */ > - static inline int __test_and_change_bit(unsigned int nr, > - volatile unsigned long *addr) > + static __always_inline int > -arch___test_and_change_bit(int nr, volatile unsigned long *addr) > ++arch___test_and_change_bit(unsigned int nr, volatile unsigned long *addr) > { > unsigned long mask = BIT_MASK(nr); > unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); > @@@ -101,7 -112,8 +112,8 @@@ > * @nr: bit number to test > * @addr: Address to start counting from > */ > - static inline int test_bit(unsigned int nr, const volatile unsigned long *addr) > + static __always_inline int > -arch_test_bit(int nr, const volatile unsigned long *addr) > ++arch_test_bit(unsigned int nr, const volatile unsigned long *addr) > { > return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); > } This is now a conflict between the asm-generic tree and Linus' tree. -- Cheers, Stephen Rothwell
Attachment:
pgpt24e3Pfz75.pgp
Description: OpenPGP digital signature