Some architectures have optimized find_*_bit_le() as static inline functions but other little-endian bitops are identical to the generic version. This adds #ifdef CONFIG_GENERIC_FIND_BIT_LE guard for find_*_bit_le() in asm-generic/bitops/le.h so that those architectures can use this header file. Signed-off-by: Akinobu Mita <akinobu.mita@xxxxxxxxx> Cc: Arnd Bergmann <arnd@xxxxxxxx> Cc: linux-arch@xxxxxxxxxxxxxxx Cc: Martin Schwidefsky <schwidefsky@xxxxxxxxxx> Cc: Heiko Carstens <heiko.carstens@xxxxxxxxxx> Cc: linux390@xxxxxxxxxx Cc: linux-s390@xxxxxxxxxxxxxxx Cc: Russell King <linux@xxxxxxxxxxxxxxxx> Cc: linux-arm-kernel@xxxxxxxxxxxxxxxxxxx --- include/asm-generic/bitops/le.h | 4 ++++ 1 files changed, 4 insertions(+), 0 deletions(-) diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h index 946a21b..bd2253e 100644 --- a/include/asm-generic/bitops/le.h +++ b/include/asm-generic/bitops/le.h @@ -30,6 +30,8 @@ static inline unsigned long find_first_zero_bit_le(const void *addr, #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) +#ifdef CONFIG_GENERIC_FIND_BIT_LE + extern unsigned long find_next_zero_bit_le(const void *addr, unsigned long size, unsigned long offset); extern unsigned long find_next_bit_le(const void *addr, @@ -38,6 +40,8 @@ extern unsigned long find_next_bit_le(const void *addr, #define find_first_zero_bit_le(addr, size) \ find_next_zero_bit_le((addr), (size), 0) +#endif /* CONFIG_GENERIC_FIND_BIT_LE */ + #else #error "Please fix <asm/byteorder.h>" #endif -- 1.7.4.4 -- To unsubscribe from this list: send the line "unsubscribe linux-arch" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html