diff options
Diffstat (limited to 'include/linux/bitops.h')
| -rw-r--r-- | include/linux/bitops.h | 307 |
1 files changed, 248 insertions, 59 deletions
diff --git a/include/linux/bitops.h b/include/linux/bitops.h index a3b6b82108b9..ea7898cc5903 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -1,14 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_BITOPS_H #define _LINUX_BITOPS_H + #include <asm/types.h> +#include <linux/bits.h> +#include <linux/typecheck.h> -#ifdef __KERNEL__ -#define BIT(nr) (1UL << (nr)) -#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) -#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) -#define BITS_PER_BYTE 8 -#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) -#endif +#include <uapi/linux/kernel.h> + +#define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long)) +#define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64)) +#define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32)) +#define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char)) + +#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_BYTE) extern unsigned int __sw_hweight8(unsigned int w); extern unsigned int __sw_hweight16(unsigned int w); @@ -16,34 +21,69 @@ extern unsigned int __sw_hweight32(unsigned int w); extern unsigned long __sw_hweight64(__u64 w); /* + * Defined here because those may be needed by architecture-specific static + * inlines. + */ + +#include <asm-generic/bitops/generic-non-atomic.h> + +/* + * Many architecture-specific non-atomic bitops contain inline asm code and due + * to that the compiler can't optimize them to compile-time expressions or + * constants. In contrary, generic_*() helpers are defined in pure C and + * compilers optimize them just well. + * Therefore, to make `unsigned long foo = 0; __set_bit(BAR, &foo)` effectively + * equal to `unsigned long foo = BIT(BAR)`, pick the generic C alternative when + * the arguments can be resolved at compile time. That expression itself is a + * constant and doesn't bring any functional changes to the rest of cases. + * The casts to `uintptr_t` are needed to mitigate `-Waddress` warnings when + * passing a bitmap from .bss or .data (-> `!!addr` is always true). + */ +#define bitop(op, nr, addr) \ + ((__builtin_constant_p(nr) && \ + __builtin_constant_p((uintptr_t)(addr) != (uintptr_t)NULL) && \ + (uintptr_t)(addr) != (uintptr_t)NULL && \ + __builtin_constant_p(*(const unsigned long *)(addr))) ? \ + const##op(nr, addr) : op(nr, addr)) + +/* + * The following macros are non-atomic versions of their non-underscored + * counterparts. + */ +#define __set_bit(nr, addr) bitop(___set_bit, nr, addr) +#define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr) +#define __change_bit(nr, addr) bitop(___change_bit, nr, addr) +#define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr) +#define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr) +#define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr) + +#define test_bit(nr, addr) bitop(_test_bit, nr, addr) +#define test_bit_acquire(nr, addr) bitop(_test_bit_acquire, nr, addr) + +/* * Include this here because some architectures need generic_ffs/fls in * scope */ #include <asm/bitops.h> -#define for_each_set_bit(bit, addr, size) \ - for ((bit) = find_first_bit((addr), (size)); \ - (bit) < (size); \ - (bit) = find_next_bit((addr), (size), (bit) + 1)) - -/* same as for_each_set_bit() but use bit as value to start with */ -#define for_each_set_bit_from(bit, addr, size) \ - for ((bit) = find_next_bit((addr), (size), (bit)); \ - (bit) < (size); \ - (bit) = find_next_bit((addr), (size), (bit) + 1)) +/* Check that the bitops prototypes are sane */ +#define __check_bitop_pr(name) \ + static_assert(__same_type(arch_##name, generic_##name) && \ + __same_type(const_##name, generic_##name) && \ + __same_type(_##name, generic_##name)) -#define for_each_clear_bit(bit, addr, size) \ - for ((bit) = find_first_zero_bit((addr), (size)); \ - (bit) < (size); \ - (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) +__check_bitop_pr(__set_bit); +__check_bitop_pr(__clear_bit); +__check_bitop_pr(__change_bit); +__check_bitop_pr(__test_and_set_bit); +__check_bitop_pr(__test_and_clear_bit); +__check_bitop_pr(__test_and_change_bit); +__check_bitop_pr(test_bit); +__check_bitop_pr(test_bit_acquire); -/* same as for_each_clear_bit() but use bit as value to start with */ -#define for_each_clear_bit_from(bit, addr, size) \ - for ((bit) = find_next_zero_bit((addr), (size), (bit)); \ - (bit) < (size); \ - (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) +#undef __check_bitop_pr -static __inline__ int get_bitmask_order(unsigned int count) +static inline int get_bitmask_order(unsigned int count) { int order; @@ -51,19 +91,9 @@ static __inline__ int get_bitmask_order(unsigned int count) return order; /* We could be slightly more clever with -1 here... */ } -static __inline__ int get_count_order(unsigned int count) -{ - int order; - - order = fls(count) - 1; - if (count & (count - 1)) - order++; - return order; -} - -static inline unsigned long hweight_long(unsigned long w) +static __always_inline unsigned long hweight_long(unsigned long w) { - return sizeof(w) == 4 ? hweight32(w) : hweight64(w); + return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w); } /** @@ -73,7 +103,7 @@ static inline unsigned long hweight_long(unsigned long w) */ static inline __u64 rol64(__u64 word, unsigned int shift) { - return (word << shift) | (word >> (64 - shift)); + return (word << (shift & 63)) | (word >> ((-shift) & 63)); } /** @@ -83,7 +113,7 @@ static inline __u64 rol64(__u64 word, unsigned int shift) */ static inline __u64 ror64(__u64 word, unsigned int shift) { - return (word >> shift) | (word << (64 - shift)); + return (word >> (shift & 63)) | (word << ((-shift) & 63)); } /** @@ -93,7 +123,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift) */ static inline __u32 rol32(__u32 word, unsigned int shift) { - return (word << shift) | (word >> (32 - shift)); + return (word << (shift & 31)) | (word >> ((-shift) & 31)); } /** @@ -103,7 +133,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift) */ static inline __u32 ror32(__u32 word, unsigned int shift) { - return (word >> shift) | (word << (32 - shift)); + return (word >> (shift & 31)) | (word << ((-shift) & 31)); } /** @@ -113,7 +143,7 @@ static inline __u32 ror32(__u32 word, unsigned int shift) */ static inline __u16 rol16(__u16 word, unsigned int shift) { - return (word << shift) | (word >> (16 - shift)); + return (word << (shift & 15)) | (word >> ((-shift) & 15)); } /** @@ -123,7 +153,7 @@ static inline __u16 rol16(__u16 word, unsigned int shift) */ static inline __u16 ror16(__u16 word, unsigned int shift) { - return (word >> shift) | (word << (16 - shift)); + return (word >> (shift & 15)) | (word << ((-shift) & 15)); } /** @@ -133,7 +163,7 @@ static inline __u16 ror16(__u16 word, unsigned int shift) */ static inline __u8 rol8(__u8 word, unsigned int shift) { - return (word << shift) | (word >> (8 - shift)); + return (word << (shift & 7)) | (word >> ((-shift) & 7)); } /** @@ -143,36 +173,101 @@ static inline __u8 rol8(__u8 word, unsigned int shift) */ static inline __u8 ror8(__u8 word, unsigned int shift) { - return (word >> shift) | (word << (8 - shift)); + return (word >> (shift & 7)) | (word << ((-shift) & 7)); } /** * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit * @value: value to sign extend * @index: 0 based bit index (0<=index<32) to sign bit + * + * This is safe to use for 16- and 8-bit types as well. */ -static inline __s32 sign_extend32(__u32 value, int index) +static __always_inline __s32 sign_extend32(__u32 value, int index) { __u8 shift = 31 - index; return (__s32)(value << shift) >> shift; } -static inline unsigned fls_long(unsigned long l) +/** + * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit + * @value: value to sign extend + * @index: 0 based bit index (0<=index<64) to sign bit + */ +static __always_inline __s64 sign_extend64(__u64 value, int index) +{ + __u8 shift = 63 - index; + return (__s64)(value << shift) >> shift; +} + +static inline unsigned int fls_long(unsigned long l) { if (sizeof(l) == 4) return fls(l); return fls64(l); } +static inline int get_count_order(unsigned int count) +{ + if (count == 0) + return -1; + + return fls(--count); +} + +/** + * get_count_order_long - get order after rounding @l up to power of 2 + * @l: parameter + * + * it is same as get_count_order() but with long type parameter + */ +static inline int get_count_order_long(unsigned long l) +{ + if (l == 0UL) + return -1; + return (int)fls_long(--l); +} + +/** + * parity8 - get the parity of an u8 value + * @value: the value to be examined + * + * Determine the parity of the u8 argument. + * + * Returns: + * 0 for even parity, 1 for odd parity + * + * Note: This function informs you about the current parity. Example to bail + * out when parity is odd: + * + * if (parity8(val) == 1) + * return -EBADMSG; + * + * If you need to calculate a parity bit, you need to draw the conclusion from + * this result yourself. Example to enforce odd parity, parity bit is bit 7: + * + * if (parity8(val) == 0) + * val ^= BIT(7); + */ +static inline int parity8(u8 val) +{ + /* + * One explanation of this algorithm: + * https://funloop.org/codex/problem/parity/README.html + */ + val ^= val >> 4; + return (0x6996 >> (val & 0xf)) & 1; +} + /** * __ffs64 - find first set bit in a 64 bit word * @word: The 64 bit word * - * On 64 bit arches this is a synomyn for __ffs + * On 64 bit arches this is a synonym for __ffs * The result is not defined if no bits are set, so check that @word * is non-zero before calling this. */ -static inline unsigned long __ffs64(u64 word) +static inline __attribute_const__ unsigned int __ffs64(u64 word) { #if BITS_PER_LONG == 32 if (((u32)word) == 0UL) @@ -183,18 +278,112 @@ static inline unsigned long __ffs64(u64 word) return __ffs((unsigned long)word); } -#ifdef __KERNEL__ +/** + * fns - find N'th set bit in a word + * @word: The word to search + * @n: Bit to find + */ +static inline unsigned int fns(unsigned long word, unsigned int n) +{ + while (word && n--) + word &= word - 1; + + return word ? __ffs(word) : BITS_PER_LONG; +} + +/** + * assign_bit - Assign value to a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * @value: the value to assign + */ +#define assign_bit(nr, addr, value) \ + ((value) ? set_bit((nr), (addr)) : clear_bit((nr), (addr))) + +#define __assign_bit(nr, addr, value) \ + ((value) ? __set_bit((nr), (addr)) : __clear_bit((nr), (addr))) + +/** + * __ptr_set_bit - Set bit in a pointer's value + * @nr: the bit to set + * @addr: the address of the pointer variable + * + * Example: + * void *p = foo(); + * __ptr_set_bit(bit, &p); + */ +#define __ptr_set_bit(nr, addr) \ + ({ \ + typecheck_pointer(*(addr)); \ + __set_bit(nr, (unsigned long *)(addr)); \ + }) -#ifndef find_last_bit /** - * find_last_bit - find the last set bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search + * __ptr_clear_bit - Clear bit in a pointer's value + * @nr: the bit to clear + * @addr: the address of the pointer variable * - * Returns the bit number of the first set bit, or size. + * Example: + * void *p = foo(); + * __ptr_clear_bit(bit, &p); */ -extern unsigned long find_last_bit(const unsigned long *addr, - unsigned long size); +#define __ptr_clear_bit(nr, addr) \ + ({ \ + typecheck_pointer(*(addr)); \ + __clear_bit(nr, (unsigned long *)(addr)); \ + }) + +/** + * __ptr_test_bit - Test bit in a pointer's value + * @nr: the bit to test + * @addr: the address of the pointer variable + * + * Example: + * void *p = foo(); + * if (__ptr_test_bit(bit, &p)) { + * ... + * } else { + * ... + * } + */ +#define __ptr_test_bit(nr, addr) \ + ({ \ + typecheck_pointer(*(addr)); \ + test_bit(nr, (unsigned long *)(addr)); \ + }) + +#ifdef __KERNEL__ + +#ifndef set_mask_bits +#define set_mask_bits(ptr, mask, bits) \ +({ \ + const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \ + typeof(*(ptr)) old__, new__; \ + \ + old__ = READ_ONCE(*(ptr)); \ + do { \ + new__ = (old__ & ~mask__) | bits__; \ + } while (!try_cmpxchg(ptr, &old__, new__)); \ + \ + old__; \ +}) +#endif + +#ifndef bit_clear_unless +#define bit_clear_unless(ptr, clear, test) \ +({ \ + const typeof(*(ptr)) clear__ = (clear), test__ = (test);\ + typeof(*(ptr)) old__, new__; \ + \ + old__ = READ_ONCE(*(ptr)); \ + do { \ + if (old__ & test__) \ + break; \ + new__ = old__ & ~clear__; \ + } while (!try_cmpxchg(ptr, &old__, new__)); \ + \ + !(old__ & test__); \ +}) #endif #endif /* __KERNEL__ */ |
