From a2812e178321132811a53f7be40fe7e9bbffd9e0 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Thu, 26 May 2011 16:26:06 -0700 Subject: arch: add #define for each of optimized find bitops The style that we normally use in asm-generic is to test the macro itself for existence, so in asm-generic, do: #ifndef find_next_zero_bit_le extern unsigned long find_next_zero_bit_le(const void *addr, unsigned long size, unsigned long offset); #endif and in the architectures, write static inline unsigned long find_next_zero_bit_le(const void *addr, unsigned long size, unsigned long offset) #define find_next_zero_bit_le find_next_zero_bit_le This adds the #define for each of the optimized find bitops in the architectures. Suggested-by: Arnd Bergmann Signed-off-by: Akinobu Mita Acked-by: Hans-Christian Egtvedt Acked-by: Russell King Acked-by: Greg Ungerer Cc: Martin Schwidefsky Cc: Heiko Carstens Acked-by: Geert Uytterhoeven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/include/asm/bitops.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm/include/asm/bitops.h') diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index 6b7403fd8f54..18a024b7b676 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -326,16 +326,19 @@ static inline int find_first_zero_bit_le(const void *p, unsigned size) { return _find_first_zero_bit_le(p, size); } +#define find_first_zero_bit_le find_first_zero_bit_le static inline int find_next_zero_bit_le(const void *p, int size, int offset) { return _find_next_zero_bit_le(p, size, offset); } +#define find_next_zero_bit_le find_next_zero_bit_le static inline int find_next_bit_le(const void *p, int size, int offset) { return _find_next_bit_le(p, size, offset); } +#define find_next_bit_le find_next_bit_le /* * Ext2 is defined to use little-endian byte ordering. -- cgit