diff options
Diffstat (limited to 'lib')
53 files changed, 1448 insertions, 1001 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 655b0e43f260..9b5a692ce00c 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -45,7 +45,6 @@ config BITREVERSE config HAVE_ARCH_BITREVERSE bool default n - depends on BITREVERSE help This option enables the use of hardware bit-reversal instructions on architectures which support such operations. @@ -65,9 +64,6 @@ config GENERIC_STRNLEN_USER config GENERIC_NET_UTILS bool -config GENERIC_FIND_FIRST_BIT - bool - source "lib/math/Kconfig" config NO_GENERIC_PCI_IOPORT_MAP @@ -122,6 +118,8 @@ config INDIRECT_IOMEM_FALLBACK mmio accesses when the IO memory address is not a registered emulated region. +source "lib/crypto/Kconfig" + config CRC_CCITT tristate "CRC-CCITT functions" help @@ -671,6 +669,10 @@ config STACKDEPOT bool select STACKTRACE +config STACKDEPOT_ALWAYS_INIT + bool + select STACKDEPOT + config STACK_HASH_ORDER int "stack depot hash size (12 => 4KB, 20 => 1024KB)" range 12 20 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index c77fe36bb3d8..a5556ab05240 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1984,6 +1984,8 @@ config KCOV bool "Code coverage for fuzzing" depends on ARCH_HAS_KCOV depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS + depends on !ARCH_WANTS_NO_INSTR || STACK_VALIDATION || \ + GCC_VERSION >= 120000 || CLANG_VERSION >= 130000 select DEBUG_FS select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC help @@ -2212,9 +2214,6 @@ config TEST_UUID config TEST_XARRAY tristate "Test the XArray code at runtime" -config TEST_OVERFLOW - tristate "Test check_*_overflow() functions at runtime" - config TEST_RHASHTABLE tristate "Perform selftest on resizable hash table" help @@ -2222,12 +2221,11 @@ config TEST_RHASHTABLE If unsure, say N. -config TEST_HASH - tristate "Perform selftest on hash functions" +config TEST_SIPHASH + tristate "Perform selftest on siphash functions" help - Enable this option to test the kernel's integer (<linux/hash.h>), - string (<linux/stringhash.h>), and siphash (<linux/siphash.h>) - hash functions on boot (or module load). + Enable this option to test the kernel's siphash (<linux/siphash.h>) hash + functions on boot (or module load). This is intended to help people writing architecture-specific optimized versions. If unsure, say N. @@ -2371,6 +2369,25 @@ config BITFIELD_KUNIT If unsure, say N. +config HASH_KUNIT_TEST + tristate "KUnit Test for integer hash functions" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + help + Enable this option to test the kernel's string (<linux/stringhash.h>), and + integer (<linux/hash.h>) hash functions on boot. + + KUnit tests run during boot and output the results to the debug log + in TAP format (https://testanything.org/). Only useful for kernel devs + running the KUnit test harness, and not intended for inclusion into a + production build. + + For more information on KUnit and unit tests in general please refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + + This is intended to help people writing architecture-specific + optimized versions. If unsure, say N. + config RESOURCE_KUNIT_TEST tristate "KUnit test for resource API" depends on KUNIT @@ -2481,6 +2498,30 @@ config MEMCPY_KUNIT_TEST If unsure, say N. +config OVERFLOW_KUNIT_TEST + tristate "Test check_*_overflow() functions at runtime" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + help + Builds unit tests for the check_*_overflow(), size_*(), allocation, and + related functions. + + For more information on KUnit and unit tests in general please refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + + If unsure, say N. + +config STACKINIT_KUNIT_TEST + tristate "Test level of stack variable initialization" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + help + Test if the kernel is zero-initializing stack variables and + padding. Coverage is controlled by compiler flags, + CONFIG_INIT_STACK_ALL_PATTERN, CONFIG_INIT_STACK_ALL_ZERO, + CONFIG_GCC_PLUGIN_STRUCTLEAK, CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF, + or CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL. + config TEST_UDELAY tristate "udelay test driver" help @@ -2502,6 +2543,7 @@ config TEST_KMOD depends on m depends on NETDEVICES && NET_CORE && INET # for TUN depends on BLOCK + depends on PAGE_SIZE_LESS_THAN_256KB # for BTRFS select TEST_LKM select XFS_FS select TUN @@ -2571,17 +2613,6 @@ config TEST_OBJAGG Enable this option to test object aggregation manager on boot (or module load). - -config TEST_STACKINIT - tristate "Test level of stack variable initialization" - help - Test if the kernel is zero-initializing stack variables and - padding. Coverage is controlled by compiler flags, - CONFIG_GCC_PLUGIN_STRUCTLEAK, CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF, - or CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL. - - If unsure, say N. - config TEST_MEMINIT tristate "Test heap/page initialization" help diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index cdc842d090db..879757b6dd14 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -38,7 +38,7 @@ menuconfig KASAN CC_HAS_WORKING_NOSANITIZE_ADDRESS) || \ HAVE_ARCH_KASAN_HW_TAGS depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB) - select STACKDEPOT + select STACKDEPOT_ALWAYS_INIT help Enables KASAN (KernelAddressSANitizer) - runtime memory debugger, designed to find out-of-bounds accesses and use-after-free bugs. diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index e0a93ffdef30..63b70b8c5551 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -191,6 +191,26 @@ config KCSAN_STRICT closely aligns with the rules defined by the Linux-kernel memory consistency model (LKMM). +config KCSAN_WEAK_MEMORY + bool "Enable weak memory modeling to detect missing memory barriers" + default y + depends on KCSAN_STRICT + # We can either let objtool nop __tsan_func_{entry,exit}() and builtin + # atomics instrumentation in .noinstr.text, or use a compiler that can + # implement __no_kcsan to really remove all instrumentation. + depends on STACK_VALIDATION || CC_IS_GCC || CLANG_VERSION >= 140000 + help + Enable support for modeling a subset of weak memory, which allows + detecting a subset of data races due to missing memory barriers. + + Depends on KCSAN_STRICT, because the options strenghtening certain + plain accesses by default (depending on !KCSAN_STRICT) reduce the + ability to detect any data races invoving reordered accesses, in + particular reordered writes. + + Weak memory modeling relies on additional instrumentation and may + affect performance. + config KCSAN_REPORT_VALUE_CHANGE_ONLY bool "Only report races where watcher observed a data value change" default y diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence index 912f252a41fc..459dda9ef619 100644 --- a/lib/Kconfig.kfence +++ b/lib/Kconfig.kfence @@ -45,6 +45,18 @@ config KFENCE_NUM_OBJECTS pages are required; with one containing the object and two adjacent ones used as guard pages. +config KFENCE_DEFERRABLE + bool "Use a deferrable timer to trigger allocations" + help + Use a deferrable timer to trigger allocations. This avoids forcing + CPU wake-ups if the system is idle, at the risk of a less predictable + sample interval. + + Warning: The KUnit test suite fails with this option enabled - due to + the unpredictability of the sample interval! + + Say N if you are unsure. + config KFENCE_STATIC_KEYS bool "Use static keys to set up allocations" if EXPERT depends on JUMP_LABEL diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index e5372a13511d..236c5cefc4cc 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan @@ -112,19 +112,6 @@ config UBSAN_UNREACHABLE This option enables -fsanitize=unreachable which checks for control flow reaching an expected-to-be-unreachable position. -config UBSAN_OBJECT_SIZE - bool "Perform checking for accesses beyond the end of objects" - default UBSAN - # gcc hugely expands stack usage with -fsanitize=object-size - # https://lore.kernel.org/lkml/CAHk-=wjPasyJrDuwDnpHJS2TuQfExwe=px-SzLeN8GFMAQJPmQ@mail.gmail.com/ - depends on !CC_IS_GCC - depends on $(cc-option,-fsanitize=object-size) - help - This option enables -fsanitize=object-size which checks for accesses - beyond the end of objects where the optimizer can determine both the - object being operated on and its size, usually seen with bad downcasts, - or access to struct members from NULL pointers. - config UBSAN_BOOL bool "Perform checking for non-boolean values used as boolean" default UBSAN diff --git a/lib/Makefile b/lib/Makefile index b213a7bbf3fd..353bc09ce38d 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -61,7 +61,8 @@ obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o obj-$(CONFIG_TEST_BITOPS) += test_bitops.o CFLAGS_test_bitops.o += -Werror obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o -obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o +obj-$(CONFIG_TEST_SIPHASH) += test_siphash.o +obj-$(CONFIG_HASH_KUNIT_TEST) += test_hash.o obj-$(CONFIG_TEST_IDA) += test_ida.o obj-$(CONFIG_KASAN_KUNIT_TEST) += test_kasan.o CFLAGS_test_kasan.o += -fno-builtin @@ -76,7 +77,6 @@ obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o obj-$(CONFIG_TEST_MIN_HEAP) += test_min_heap.o obj-$(CONFIG_TEST_LKM) += test_module.o obj-$(CONFIG_TEST_VMALLOC) += test_vmalloc.o -obj-$(CONFIG_TEST_OVERFLOW) += test_overflow.o obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o obj-$(CONFIG_TEST_SORT) += test_sort.o obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o @@ -93,8 +93,6 @@ obj-$(CONFIG_TEST_KMOD) += test_kmod.o obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o -CFLAGS_test_stackinit.o += $(call cc-disable-warning, switch-unreachable) -obj-$(CONFIG_TEST_STACKINIT) += test_stackinit.o obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o @@ -362,6 +360,9 @@ obj-$(CONFIG_BITS_TEST) += test_bits.o obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o +obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o +CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable) +obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o diff --git a/lib/asn1_encoder.c b/lib/asn1_encoder.c index 27bbe891714f..0fd3c454a468 100644 --- a/lib/asn1_encoder.c +++ b/lib/asn1_encoder.c @@ -164,8 +164,6 @@ asn1_encode_oid(unsigned char *data, const unsigned char *end_data, data_len -= 3; - ret = 0; - for (i = 2; i < oid_len; i++) { ret = asn1_encode_oid_digit(&d, &data_len, oid[i]); if (ret < 0) diff --git a/lib/atomic64.c b/lib/atomic64.c index 3df653994177..caf895789a1e 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c @@ -118,7 +118,6 @@ ATOMIC64_OPS(sub, -=) #undef ATOMIC64_OPS #define ATOMIC64_OPS(op, c_op) \ ATOMIC64_OP(op, c_op) \ - ATOMIC64_OP_RETURN(op, c_op) \ ATOMIC64_FETCH_OP(op, c_op) ATOMIC64_OPS(and, &=) @@ -127,7 +126,6 @@ ATOMIC64_OPS(xor, ^=) #undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP -#undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP s64 generic_atomic64_dec_if_positive(atomic64_t *v) diff --git a/lib/crc32.c b/lib/crc32.c index 2a68dfd3b96c..5649847d0a8d 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -194,13 +194,11 @@ u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len) #else u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len) { - return crc32_le_generic(crc, p, len, - (const u32 (*)[256])crc32table_le, CRC32_POLY_LE); + return crc32_le_generic(crc, p, len, crc32table_le, CRC32_POLY_LE); } u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len) { - return crc32_le_generic(crc, p, len, - (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE); + return crc32_le_generic(crc, p, len, crc32ctable_le, CRC32C_POLY_LE); } #endif EXPORT_SYMBOL(crc32_le); @@ -208,6 +206,7 @@ EXPORT_SYMBOL(__crc32c_le); u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le); u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le); +u32 __pure crc32_be_base(u32, unsigned char const *, size_t) __alias(crc32_be); /* * This multiplies the polynomials x and y modulo the given modulus. @@ -332,15 +331,14 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p, } #if CRC_BE_BITS == 1 -u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) +u32 __pure __weak crc32_be(u32 crc, unsigned char const *p, size_t len) { return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE); } #else -u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) +u32 __pure __weak crc32_be(u32 crc, unsigned char const *p, size_t len) { - return crc32_be_generic(crc, p, len, - (const u32 (*)[256])crc32table_be, CRC32_POLY_BE); + return crc32_be_generic(crc, p, len, crc32table_be, CRC32_POLY_BE); } #endif EXPORT_SYMBOL(crc32_be); diff --git a/lib/crc32test.c b/lib/crc32test.c index 61ddce2cff77..9b4af79412c4 100644 --- a/lib/crc32test.c +++ b/lib/crc32test.c @@ -675,7 +675,7 @@ static int __init crc32c_test(void) /* pre-warm the cache */ for (i = 0; i < 100; i++) { - bytes += 2*test[i].length; + bytes += test[i].length; crc ^= __crc32c_le(test[i].crc, test_buf + test[i].start, test[i].length); diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 8620f38e117c..379a66d7f504 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -1,5 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 +menu "Crypto library routines" + config CRYPTO_LIB_AES tristate @@ -31,7 +33,7 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA config CRYPTO_LIB_CHACHA_GENERIC tristate - select CRYPTO_ALGAPI + select XOR_BLOCKS help This symbol can be depended upon by arch implementations of the ChaCha library interface that require the generic code as a @@ -40,7 +42,8 @@ config CRYPTO_LIB_CHACHA_GENERIC of CRYPTO_LIB_CHACHA. config CRYPTO_LIB_CHACHA - tristate + tristate "ChaCha library interface" + depends on CRYPTO depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n help @@ -65,7 +68,7 @@ config CRYPTO_LIB_CURVE25519_GENERIC of CRYPTO_LIB_CURVE25519. config CRYPTO_LIB_CURVE25519 - tristate + tristate "Curve25519 scalar multiplication library" depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519 select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n help @@ -100,7 +103,7 @@ config CRYPTO_LIB_POLY1305_GENERIC of CRYPTO_LIB_POLY1305. config CRYPTO_LIB_POLY1305 - tristate + tristate "Poly1305 library interface" depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305 select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n help @@ -109,14 +112,21 @@ config CRYPTO_LIB_POLY1305 is available and enabled. config CRYPTO_LIB_CHACHA20POLY1305 - tristate + tristate "ChaCha20-Poly1305 AEAD support (8-byte nonce library version)" depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305 + depends on CRYPTO select CRYPTO_LIB_CHACHA select CRYPTO_LIB_POLY1305 + select CRYPTO_ALGAPI config CRYPTO_LIB_SHA256 tristate +config CRYPTO_LIB_SM3 + tristate + config CRYPTO_LIB_SM4 tristate + +endmenu diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index ed43a41f2dcc..6c872d05d1e6 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -37,6 +37,9 @@ libpoly1305-y += poly1305.o obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o libsha256-y := sha256.o +obj-$(CONFIG_CRYPTO_LIB_SM3) += libsm3.o +libsm3-y := sm3.o + obj-$(CONFIG_CRYPTO_LIB_SM4) += libsm4.o libsm4-y := sm4.o diff --git a/lib/crypto/blake2s-selftest.c b/lib/crypto/blake2s-selftest.c index 5d9ea53be973..409e4b728770 100644 --- a/lib/crypto/blake2s-selftest.c +++ b/lib/crypto/blake2s-selftest.c @@ -15,7 +15,6 @@ * #include <stdio.h> * * #include <openssl/evp.h> - * #include <openssl/hmac.h> * * #define BLAKE2S_TESTVEC_COUNT 256 * @@ -58,16 +57,6 @@ * } * printf("};\n\n"); * - * printf("static const u8 blake2s_hmac_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {\n"); - * - * HMAC(EVP_blake2s256(), key, sizeof(key), buf, sizeof(buf), hash, NULL); - * print_vec(hash, BLAKE2S_OUTBYTES); - * - * HMAC(EVP_blake2s256(), buf, sizeof(buf), key, sizeof(key), hash, NULL); - * print_vec(hash, BLAKE2S_OUTBYTES); - * - * printf("};\n"); - * * return 0; *} */ @@ -554,15 +543,6 @@ static const u8 blake2s_testvecs[][BLAKE2S_HASH_SIZE] __initconst = { 0xd6, 0x98, 0x6b, 0x07, 0x10, 0x65, 0x52, 0x65, }, }; -static const u8 blake2s_hmac_testvecs[][BLAKE2S_HASH_SIZE] __initconst = { - { 0xce, 0xe1, 0x57, 0x69, 0x82, 0xdc, 0xbf, 0x43, 0xad, 0x56, 0x4c, 0x70, - 0xed, 0x68, 0x16, 0x96, 0xcf, 0xa4, 0x73, 0xe8, 0xe8, 0xfc, 0x32, 0x79, - 0x08, 0x0a, 0x75, 0x82, 0xda, 0x3f, 0x05, 0x11, }, - { 0x77, 0x2f, 0x0c, 0x71, 0x41, 0xf4, 0x4b, 0x2b, 0xb3, 0xc6, 0xb6, 0xf9, - 0x60, 0xde, 0xe4, 0x52, 0x38, 0x66, 0xe8, 0xbf, 0x9b, 0x96, 0xc4, 0x9f, - 0x60, 0xd9, 0x24, 0x37, 0x99, 0xd6, 0xec, 0x31, }, -}; - bool __init blake2s_selftest(void) { u8 key[BLAKE2S_KEY_SIZE]; @@ -607,16 +587,5 @@ bool __init blake2s_selftest(void) } } - if (success) { - blake2s256_hmac(hash, buf, key, sizeof(buf), sizeof(key)); - success &= !memcmp(hash, blake2s_hmac_testvecs[0], BLAKE2S_HASH_SIZE); - - blake2s256_hmac(hash, key, buf, sizeof(key), sizeof(buf)); - success &= !memcmp(hash, blake2s_hmac_testvecs[1], BLAKE2S_HASH_SIZE); - - if (!success) - pr_err("blake2s256_hmac self-test: FAIL\n"); - } - return success; } diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c index 93f2ae051370..c71c09621c09 100644 --- a/lib/crypto/blake2s.c +++ b/lib/crypto/blake2s.c @@ -18,55 +18,18 @@ void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen) { - __blake2s_update(state, in, inlen, blake2s_compress); + __blake2s_update(state, in, inlen, false); } EXPORT_SYMBOL(blake2s_update); void blake2s_final(struct blake2s_state *state, u8 *out) { WARN_ON(IS_ENABLED(DEBUG) && !out); - __blake2s_final(state, out, blake2s_compress); + __blake2s_final(state, out, false); memzero_explicit(state, sizeof(*state)); } EXPORT_SYMBOL(blake2s_final); -void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen, - const size_t keylen) -{ - struct blake2s_state state; - u8 x_key[BLAKE2S_BLOCK_SIZE] __aligned(__alignof__(u32)) = { 0 }; - u8 i_hash[BLAKE2S_HASH_SIZE] __aligned(__alignof__(u32)); - int i; - - if (keylen > BLAKE2S_BLOCK_SIZE) { - blake2s_init(&state, BLAKE2S_HASH_SIZE); - blake2s_update(&state, key, keylen); - blake2s_final(&state, x_key); - } else - memcpy(x_key, key, keylen); - - for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i) - x_key[i] ^= 0x36; - - blake2s_init(&state, BLAKE2S_HASH_SIZE); - blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE); - blake2s_update(&state, in, inlen); - blake2s_final(&state, i_hash); - - for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i) - x_key[i] ^= 0x5c ^ 0x36; - - blake2s_init(&state, BLAKE2S_HASH_SIZE); - blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE); - blake2s_update(&state, i_hash, BLAKE2S_HASH_SIZE); - blake2s_final(&state, i_hash); - - memcpy(out, i_hash, BLAKE2S_HASH_SIZE); - memzero_explicit(x_key, BLAKE2S_BLOCK_SIZE); - memzero_explicit(i_hash, BLAKE2S_HASH_SIZE); -} -EXPORT_SYMBOL(blake2s256_hmac); - static int __init blake2s_mod_init(void) { if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) && diff --git a/lib/crypto/sm3.c b/lib/crypto/sm3.c new file mode 100644 index 000000000000..d473e358a873 --- /dev/null +++ b/lib/crypto/sm3.c @@ -0,0 +1,246 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and described + * at https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02 + * + * Copyright (C) 2017 ARM Limited or its affiliates. + * Copyright (C) 2017 Gilad Ben-Yossef <gilad@benyossef.com> + * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> + */ + +#include <linux/module.h> +#include <asm/unaligned.h> +#include <crypto/sm3.h> + +static const u32 ____cacheline_aligned K[64] = { + 0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb, + 0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc, + 0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce, + 0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6, + 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c, + 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce, + 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec, + 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5, + 0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53, + 0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d, + 0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4, + 0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43, + 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c, + 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce, + 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec, + 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5 +}; + +/* + * Transform the message X which consists of 16 32-bit-words. See + * GM/T 004-2012 for details. + */ +#define R(i, a, b, c, d, e, f, g, h, t, w1, w2) \ + do { \ + ss1 = rol32((rol32((a), 12) + (e) + (t)), 7); \ + ss2 = ss1 ^ rol32((a), 12); \ + d += FF ## i(a, b, c) + ss2 + ((w1) ^ (w2)); \ + h += GG ## i(e, f, g) + ss1 + (w1); \ + b = rol32((b), 9); \ + f = rol32((f), 19); \ + h = P0((h)); \ + } while (0) + +#define R1(a, b, c, d, e, f, g, h, t, w1, w2) \ + R(1, a, b, c, d, e, f, g, h, t, w1, w2) +#define R2(a, b, c, d, e, f, g, h, t, w1, w2) \ + R(2, a, b, c, d, e, f, g, h, t, w1, w2) + +#define FF1(x, y, z) (x ^ y ^ z) +#define FF2(x, y, z) ((x & y) | (x & z) | (y & z)) + +#define GG1(x, y, z) FF1(x, y, z) +#define GG2(x, y, z) ((x & y) | (~x & z)) + +/* Message expansion */ +#define P0(x) ((x) ^ rol32((x), 9) ^ rol32((x), 17)) +#define P1(x) ((x) ^ rol32((x), 15) ^ rol32((x), 23)) +#define I(i) (W[i] = get_unaligned_be32(data + i * 4)) +#define W1(i) (W[i & 0x0f]) +#define W2(i) (W[i & 0x0f] = \ + P1(W[i & 0x0f] \ + ^ W[(i-9) & 0x0f] \ + ^ rol32(W[(i-3) & 0x0f], 15)) \ + ^ rol32(W[(i-13) & 0x0f], 7) \ + ^ W[(i-6) & 0x0f]) + +static void sm3_transform(struct sm3_state *sctx, u8 const *data, u32 W[16]) +{ + u32 a, b, c, d, e, f, g, h, ss1, ss2; + + a = sctx->state[0]; + b = sctx->state[1]; + c = sctx->state[2]; + d = sctx->state[3]; + e = sctx->state[4]; + f = sctx->state[5]; + g = sctx->state[6]; + h = sctx->state[7]; + + R1(a, b, c, d, e, f, g, h, K[0], I(0), I(4)); + R1(d, a, b, c, h, e, f, g, K[1], I(1), I(5)); + R1(c, d, a, b, g, h, e, f, K[2], I(2), I(6)); + R1(b, c, d, a, f, g, h, e, K[3], I(3), I(7)); + R1(a, b, c, d, e, f, g, h, K[4], W1(4), I(8)); + R1(d, a, b, c, h, e, f, g, K[5], W1(5), I(9)); + R1(c, d, a, b, g, h, e, f, K[6], W1(6), I(10)); + R1(b, c, d, a, f, g, h, e, K[7], W1(7), I(11)); + R1(a, b, c, d, e, f, g, h, K[8], W1(8), I(12)); + R1(d, a, b, c, h, e, f, g, K[9], W1(9), I(13)); + R1(c, d, a, b, g, h, e, f, K[10], W1(10), I(14)); + R1(b, c, d, a, f, g, h, e, K[11], W1(11), I(15)); + R1(a, b, c, d, e, f, g, h, K[12], W1(12), W2(16)); + R1(d, a, b, c, h, e, f, g, K[13], W1(13), W2(17)); + R1(c, d, a, b, g, h, e, f, K[14], W1(14), W2(18)); + R1(b, c, d, a, f, g, h, e, K[15], W1(15), W2(19)); + + R2(a, b, c, d, e, f, g, h, K[16], W1(16), W2(20)); + R2(d, a, b, c, h, e, f, g, K[17], W1(17), W2(21)); + R2(c, d, a, b, g, h, e, f, K[18], W1(18), W2(22)); + R2(b, c, d, a, f, g, h, e, K[19], W1(19), W2(23)); + R2(a, b, c, d, e, f, g, h, K[20], W1(20), W2(24)); + R2(d, a, b, c, h, e, f, g, K[21], W1(21), W2(25)); + R2(c, d, a, b, g, h, e, f, K[22], W1(22), W2(26)); + R2(b, c, d, a, f, g, h, e, K[23], W1(23), W2(27)); + R2(a, b, c, d, e, f, g, h, K[24], W1(24), W2(28)); + R2(d, a, b, c, h, e, f, g, K[25], W1(25), W2(29)); + R2(c, d, a, b, g, h, e, f, K[26], W1(26), W2(30)); + R2(b, c, d, a, f, g, h, e, K[27], W1(27), W2(31)); + R2(a, b, c, d, e, f, g, h, K[28], W1(28), W2(32)); + R2(d, a, b, c, h, e, f, g, K[29], W1(29), W2(33)); + R2(c, d, a, b, g, h, e, f, K[30], W1(30), W2(34)); + R2(b, c, d, a, f, g, h, e, K[31], W1(31), W2(35)); + + R2(a, b, c, d, e, f, g, h, K[32], W1(32), W2(36)); + R2(d, a, b, c, h, e, f, g, K[33], W1(33), W2(37)); + R2(c, d, a, b, g, h, e, f, K[34], W1(34), W2(38)); + R2(b, c, d, a, f, g, h, e, K[35], W1(35), W2(39)); + R2(a, b, c, d, e, f, g, h, K[36], W1(36), W2(40)); + R2(d, a, b, c, h, e, f, g, K[37], W1(37), W2(41)); + R2(c, d, a, b, g, h, e, f, K[38], W1(38), W2(42)); + R2(b, c, d, a, f, g, h, e, K[39], W1(39), W2(43)); + R2(a, b, c, d, e, f, g, h, K[40], W1(40), W2(44)); + R2(d, a, b, c, h, e, f, g, K[41], W1(41), W2(45)); + R2(c, d, a, b, g, h, e, f, K[42], W1(42), W2(46)); + R2(b, c, d, a, f, g, h, e, K[43], W1(43), W2(47)); + R2(a, b, c, d, e, f, g, h, K[44], W1(44), W2(48)); + R2(d, a, b, c, h, e, f, g, K[45], W1(45), W2(49)); + R2(c, d, a, b, g, h, e, f, K[46], W1(46), W2(50)); + R2(b, c, d, a, f, g, h, e, K[47], W1(47), W2(51)); + + R2(a, b, c, d, e, f, g, h, K[48], W1(48), W2(52)); + R2(d, a, b, c, h, e, f, g, K[49], W1(49), W2(53)); + R2(c, d, a, b, g, h, e, f, K[50], W1(50), W2(54)); + R2(b, c, d, a, f, g, h, e, K[51], W1(51), W2(55)); + R2(a, b, c, d, e, f, g, h, K[52], W1(52), W2(56)); + R2(d, a, b, c, h, e, f, g, K[53], W1(53), W2(57)); + R2(c, d, a, b, g, h, e, f, K[54], W1(54), W2(58)); + R2(b, c, d, a, f, g, h, e, K[55], W1(55), W2(59)); + R2(a, b, c, d, e, f, g, h, K[56], W1(56), W2(60)); + R2(d, a, b, c, h, e, f, g, K[57], W1(57), W2(61)); + R2(c, d, a, b, g, h, e, f, K[58], W1(58), W2(62)); + R2(b, c, d, a, f, g, h, e, K[59], W1(59), W2(63)); + R2(a, b, c, d, e, f, g, h, K[60], W1(60), W2(64)); + R2(d, a, b, c, h, e, f, g, K[61], W1(61), W2(65)); + R2(c, d, a, b, g, h, e, f, K[62], W1(62), W2(66)); + R2(b, c, d, a, f, g, h, e, K[63], W1(63), W2(67)); + + sctx->state[0] ^= a; + sctx->state[1] ^= b; + sctx->state[2] ^= c; + sctx->state[3] ^= d; + sctx->state[4] ^= e; + sctx->state[5] ^= f; + sctx->state[6] ^= g; + sctx->state[7] ^= h; +} +#undef R +#undef R1 +#undef R2 +#undef I +#undef W1 +#undef W2 + +static inline void sm3_block(struct sm3_state *sctx, + u8 const *data, int blocks, u32 W[16]) +{ + while (blocks--) { + sm3_transform(sctx, data, W); + data += SM3_BLOCK_SIZE; + } +} + +void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len) +{ + unsigned int partial = sctx->count % SM3_BLOCK_SIZE; + u32 W[16]; + + sctx->count += len; + + if ((partial + len) >= SM3_BLOCK_SIZE) { + int blocks; + + if (partial) { + int p = SM3_BLOCK_SIZE - partial; + + memcpy(sctx->buffer + partial, data, p); + data += p; + len -= p; + + sm3_block(sctx, sctx->buffer, 1, W); + } + + blocks = len / SM3_BLOCK_SIZE; + len %= SM3_BLOCK_SIZE; + + if (blocks) { + sm3_block(sctx, data, blocks, W); + data += blocks * SM3_BLOCK_SIZE; + } + + memzero_explicit(W, sizeof(W)); + + partial = 0; + } + if (len) + memcpy(sctx->buffer + partial, data, len); +} +EXPORT_SYMBOL_GPL(sm3_update); + +void sm3_final(struct sm3_state *sctx, u8 *out) +{ + const int bit_offset = SM3_BLOCK_SIZE - sizeof(u64); + __be64 *bits = (__be64 *)(sctx->buffer + bit_offset); + __be32 *digest = (__be32 *)out; + unsigned int partial = sctx->count % SM3_BLOCK_SIZE; + u32 W[16]; + int i; + + sctx->buffer[partial++] = 0x80; + if (partial > bit_offset) { + memset(sctx->buffer + partial, 0, SM3_BLOCK_SIZE - partial); + partial = 0; + + sm3_block(sctx, sctx->buffer, 1, W); + } + + memset(sctx->buffer + partial, 0, bit_offset - partial); + *bits = cpu_to_be64(sctx->count << 3); + sm3_block(sctx, sctx->buffer, 1, W); + + for (i = 0; i < 8; i++) + put_unaligned_be32(sctx->state[i], digest++); + + /* Zeroize sensitive information. */ + memzero_explicit(W, sizeof(W)); + memzero_explicit(sctx, sizeof(*sctx)); +} +EXPORT_SYMBOL_GPL(sm3_final); + +MODULE_DESCRIPTION("Generic SM3 library"); +MODULE_LICENSE("GPL v2"); diff --git a/lib/find_bit.c b/lib/find_bit.c index 0f8e2e369b1d..1b8e4b2a9cba 100644 --- a/lib/find_bit.c +++ b/lib/find_bit.c @@ -89,6 +89,27 @@ unsigned long _find_first_bit(const unsigned long *addr, unsigned long size) EXPORT_SYMBOL(_find_first_bit); #endif +#ifndef find_first_and_bit +/* + * Find the first set bit in two memory regions. + */ +unsigned long _find_first_and_bit(const unsigned long *addr1, + const unsigned long *addr2, + unsigned long size) +{ + unsigned long idx, val; + + for (idx = 0; idx * BITS_PER_LONG < size; idx++) { + val = addr1[idx] & addr2[idx]; + if (val) + return min(idx * BITS_PER_LONG + __ffs(val), size); + } + + return size; +} +EXPORT_SYMBOL(_find_first_and_bit); +#endif + #ifndef find_first_zero_bit /* * Find the first cleared bit in a memory region. diff --git a/lib/find_bit_benchmark.c b/lib/find_bit_benchmark.c index 5637c5711db9..db904b57d4b8 100644 --- a/lib/find_bit_benchmark.c +++ b/lib/find_bit_benchmark.c @@ -49,6 +49,25 @@ static int __init test_find_first_bit(void *bitmap, unsigned long len) return 0; } +static int __init test_find_first_and_bit(void *bitmap, const void *bitmap2, unsigned long len) +{ + static DECLARE_BITMAP(cp, BITMAP_LEN) __initdata; + unsigned long i, cnt; + ktime_t time; + + bitmap_copy(cp, bitmap, BITMAP_LEN); + + time = ktime_get(); + for (cnt = i = 0; i < len; cnt++) { + i = find_first_and_bit(cp, bitmap2, len); + __clear_bit(i, cp); + } + time = ktime_get() - time; + pr_err("find_first_and_bit: %18llu ns, %6ld iterations\n", time, cnt); + + return 0; +} + static int __init test_find_next_bit(const void *bitmap, unsigned long len) { unsigned long i, cnt; @@ -129,6 +148,7 @@ static int __init find_bit_test(void) * traverse only part of bitmap to avoid soft lockup. */ test_find_first_bit(bitmap, BITMAP_LEN / 10); + test_find_first_and_bit(bitmap, bitmap2, BITMAP_LEN / 2); test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN); pr_err("\nStart testing find_bit() with sparse bitmap\n"); @@ -145,6 +165,7 @@ static int __init find_bit_test(void) test_find_next_zero_bit(bitmap, BITMAP_LEN); test_find_last_bit(bitmap, BITMAP_LEN); test_find_first_bit(bitmap, BITMAP_LEN); + test_find_first_and_bit(bitmap, bitmap2, BITMAP_LEN); test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN); /* diff --git a/lib/genalloc.c b/lib/genalloc.c index 9a57257988c7..00fc50d0a640 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -251,7 +251,7 @@ void gen_pool_destroy(struct gen_pool *pool) list_del(&chunk->next_chunk); end_bit = chunk_size(chunk) >> order; - bit = find_next_bit(chunk->bits, end_bit, 0); + bit = find_first_bit(chunk->bits, end_bit); BUG_ON(bit < end_bit); vfree(chunk); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 66a740e6e153..6dd5330f7a99 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -69,42 +69,40 @@ #define iterate_xarray(i, n, base, len, __off, STEP) { \ __label__ __out; \ size_t __off = 0; \ - struct page *head = NULL; \ + struct folio *folio; \ loff_t start = i->xarray_start + i->iov_offset; \ - unsigned offset = start % PAGE_SIZE; \ pgoff_t index = start / PAGE_SIZE; \ - int j; \ - \ XA_STATE(xas, i->xarray, index); \ \ + len = PAGE_SIZE - offset_in_page(start); \ rcu_read_lock(); \ - xas_for_each(&xas, head, ULONG_MAX) { \ + xas_for_each(&xas, folio, ULONG_MAX) { \ unsigned left; \ - if (xas_retry(&xas, head)) \ + size_t offset; \ + if (xas_retry(&xas, folio)) \ continue; \ - if (WARN_ON(xa_is_value(head))) \ + if (WARN_ON(xa_is_value(folio))) \ break; \ - if (WARN_ON(PageHuge(head))) \ + if (WARN_ON(folio_test_hugetlb(folio))) \ break; \ - for (j = (head->index < index) ? index - head->index : 0; \ - j < thp_nr_pages(head); j++) { \ - void *kaddr = kmap_local_page(head + j); \ - base = kaddr + offset; \ - len = PAGE_SIZE - offset; \ + offset = offset_in_folio(folio, start + __off); \ + while (offset < folio_size(folio)) { \ + base = kmap_local_folio(folio, offset); \ len = min(n, len); \ left = (STEP); \ - kunmap_local(kaddr); \ + kunmap_local(base); \ len -= left; \ __off += len; \ n -= len; \ if (left || n == 0) \ goto __out; \ - offset = 0; \ + offset += len; \ + len = PAGE_SIZE; \ } \ } \ __out: \ rcu_read_unlock(); \ - i->iov_offset += __off; \ + i->iov_offset += __off; \ n = __off; \ } @@ -416,6 +414,7 @@ static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t by return 0; buf->ops = &page_cache_pipe_buf_ops; + buf->flags = 0; get_page(page); buf->page = page; buf->offset = offset; @@ -579,6 +578,7 @@ static size_t push_pipe(struct iov_iter *i, size_t size, break; buf->ops = &default_pipe_buf_ops; + buf->flags = 0; buf->page = page; buf->offset = 0; buf->len = min_t(ssize_t, left, PAGE_SIZE); diff --git a/lib/kobject.c b/lib/kobject.c index 4a56f519139d..56fa037501b5 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -65,7 +65,7 @@ void kobject_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) */ static int populate_dir(struct kobject *kobj) { - struct kobj_type *t = get_ktype(kobj); + const struct kobj_type *t = get_ktype(kobj); struct attribute *attr; int error = 0; int i; @@ -346,7 +346,7 @@ EXPORT_SYMBOL(kobject_set_name); * to kobject_put(), not by a call to kfree directly to ensure that all of * the memory is cleaned up properly. */ -void kobject_init(struct kobject *kobj, struct kobj_type *ktype) +void kobject_init(struct kobject *kobj, const struct kobj_type *ktype) { char *err_str; @@ -461,7 +461,7 @@ EXPORT_SYMBOL(kobject_add); * same type of error handling after a call to kobject_add() and kobject * lifetime rules are the same here. */ -int kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype, +int kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, struct kobject *parent, const char *fmt, ...) { va_list args; @@ -679,7 +679,7 @@ EXPORT_SYMBOL(kobject_get_unless_zero); static void kobject_cleanup(struct kobject *kobj) { struct kobject *parent = kobj->parent; - struct kobj_type *t = get_ktype(kobj); + const struct kobj_type *t = get_ktype(kobj); const char *name = kobj->name; pr_debug("kobject: '%s' (%p): %s, parent %p\n", diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index c87d5b6a8a55..7c44b7ae4c5c 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -501,7 +501,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, } /* skip the event, if the filter returns zero. */ if (uevent_ops && uevent_ops->filter) - if (!uevent_ops->filter(kset, kobj)) { + if (!uevent_ops->filter(kobj)) { pr_debug("kobject: '%s' (%p): %s: filter function " "caused the event to drop!\n", kobject_name(kobj), kobj, __func__); @@ -510,7 +510,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, /* originating subsystem */ if (uevent_ops && uevent_ops->name) - subsystem = uevent_ops->name(kset, kobj); + subsystem = uevent_ops->name(kobj); else subsystem = kobject_name(&kset->kobj); if (!subsystem) { @@ -554,7 +554,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, /* let the kset specific function add its stuff */ if (uevent_ops && uevent_ops->uevent) { - retval = uevent_ops->uevent(kset, kobj, env); + retval = uevent_ops->uevent(kobj, env); if (retval) { pr_debug("kobject: '%s' (%p): %s: uevent() returned " "%d\n", kobject_name(kobj), kobj, diff --git a/lib/kstrtox.c b/lib/kstrtox.c index 059b8b00dc53..886510d248e5 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c @@ -22,6 +22,7 @@ #include "kstrtox.h" +noinline const char *_parse_integer_fixup_radix(const char *s, unsigned int *base) { if (*base == 0) { @@ -47,6 +48,7 @@ const char *_parse_integer_fixup_radix(const char *s, unsigned int *base) * * Don't you dare use this function. */ +noinline unsigned int _parse_integer_limit(const char *s, unsigned int base, unsigned long long *p, size_t max_chars) { @@ -85,6 +87,7 @@ unsigned int _parse_integer_limit(const char *s, unsigned int base, unsigned lon return rv; } +noinline unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *p) { return _parse_integer_limit(s, base, p, INT_MAX); @@ -125,6 +128,7 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. * Preferred over simple_strtoull(). Return code must be checked. */ +noinline int kstrtoull(const char *s, unsigned int base, unsigned long long *res) { if (s[0] == '+') @@ -148,6 +152,7 @@ EXPORT_SYMBOL(kstrtoull); * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. * Preferred over simple_strtoll(). Return code must be checked. */ +noinline int kstrtoll(const char *s, unsigned int base, long long *res) { unsigned long long tmp; @@ -219,6 +224,7 @@ EXPORT_SYMBOL(_kstrtol); * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. * Preferred over simple_strtoul(). Return code must be checked. */ +noinline int kstrtouint(const char *s, unsigned int base, unsigned int *res) { unsigned long long tmp; @@ -249,6 +255,7 @@ EXPORT_SYMBOL(kstrtouint); * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. * Preferred over simple_strtol(). Return code must be checked. */ +noinline int kstrtoint(const char *s, unsigned int base, int *res) { long long tmp; @@ -264,6 +271,7 @@ int kstrtoint(const char *s, unsigned int base, int *res) } EXPORT_SYMBOL(kstrtoint); +noinline int kstrtou16(const char *s, unsigned int base, u16 *res) { unsigned long long tmp; @@ -279,6 +287,7 @@ int kstrtou16(const char *s, unsigned int base, u16 *res) } EXPORT_SYMBOL(kstrtou16); +noinline int kstrtos16(const char *s, unsigned int base, s16 *res) { long long tmp; @@ -294,6 +303,7 @@ int kstrtos16(const char *s, unsigned int base, s16 *res) } EXPORT_SYMBOL(kstrtos16); +noinline int kstrtou8(const char *s, unsigned int base, u8 *res) { unsigned long long tmp; @@ -309,6 +319,7 @@ int kstrtou8(const char *s, unsigned int base, u8 *res) } EXPORT_SYMBOL(kstrtou8); +noinline int kstrtos8(const char *s, unsigned int base, s8 *res) { long long tmp; @@ -333,6 +344,7 @@ EXPORT_SYMBOL(kstrtos8); * [oO][NnFf] for "on" and "off". Otherwise it will return -EINVAL. Value * pointed to by res is updated upon finding a match. */ +noinline int kstrtobool(const char *s, bool *res) { if (!s) diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c index 0dd434e40487..f7825991d576 100644 --- a/lib/kunit/try-catch.c +++ b/lib/kunit/try-catch.c @@ -17,7 +17,7 @@ void __noreturn kunit_try_catch_throw(struct kunit_try_catch *try_catch) { try_catch->try_result = -EFAULT; - complete_and_exit(try_catch->try_completion, -EFAULT); + kthread_complete_and_exit(try_catch->try_completion, -EFAULT); } EXPORT_SYMBOL_GPL(kunit_try_catch_throw); @@ -27,7 +27,7 @@ static int kunit_generic_run_threadfn_adapter(void *data) try_catch->try(try_catch->context); - complete_and_exit(try_catch->try_completion, 0); + kthread_complete_and_exit(try_catch->try_completion, 0); } static unsigned long kunit_test_timeout(void) @@ -52,7 +52,7 @@ static unsigned long kunit_test_timeout(void) * If tests timeout due to exceeding sysctl_hung_task_timeout_secs, * the task will be killed and an oops generated. */ - return 300 * MSEC_PER_SEC; /* 5 min */ + return 300 * msecs_to_jiffies(MSEC_PER_SEC); /* 5 min */ } void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context) @@ -78,6 +78,7 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context) if (time_remaining == 0) { kunit_err(test, "try timed out\n"); try_catch->try_result = -ETIMEDOUT; + kthread_stop(task_struct); } exit_code = try_catch->try_result; diff --git a/lib/list_debug.c b/lib/list_debug.c index 5d5424b51b74..9daa3fb9d1cd 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c @@ -49,11 +49,11 @@ bool __list_del_entry_valid(struct list_head *entry) "list_del corruption, %px->prev is LIST_POISON2 (%px)\n", entry, LIST_POISON2) || CHECK_DATA_CORRUPTION(prev->next != entry, - "list_del corruption. prev->next should be %px, but was %px\n", - entry, prev->next) || + "list_del corruption. prev->next should be %px, but was %px. (prev=%px)\n", + entry, prev->next, prev) || CHECK_DATA_CORRUPTION(next->prev != entry, - "list_del corruption. next->prev should be %px, but was %px\n", - entry, next->prev)) + "list_del corruption. next->prev should be %px, but was %px. (next=%px)\n", + entry, next->prev, next)) return false; return true; diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 71652e1c397c..8d24279fad05 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -26,6 +26,12 @@ #include <linux/rtmutex.h> #include <linux/local_lock.h> +#ifdef CONFIG_PREEMPT_RT +# define NON_RT(...) +#else +# define NON_RT(...) __VA_ARGS__ +#endif + /* * Change this to 1 if you want to see the failure printouts: */ @@ -139,7 +145,7 @@ static DEFINE_RT_MUTEX(rtmutex_Z2); #endif -static local_lock_t local_A = INIT_LOCAL_LOCK(local_A); +static DEFINE_PER_CPU(local_lock_t, local_A); /* * non-inlined runtime initializers, to let separate locks share @@ -712,12 +718,18 @@ GENERATE_TESTCASE(ABCDBCDA_rtmutex); #undef E +#ifdef CONFIG_PREEMPT_RT +# define RT_PREPARE_DBL_UNLOCK() { migrate_disable(); rcu_read_lock(); } +#else +# define RT_PREPARE_DBL_UNLOCK() +#endif /* * Double unlock: */ #define E() \ \ LOCK(A); \ + RT_PREPARE_DBL_UNLOCK(); \ UNLOCK(A); \ UNLOCK(A); /* fail */ @@ -802,6 +814,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock) #include "locking-selftest-wlock-hardirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_wlock) +#ifndef CONFIG_PREEMPT_RT #include "locking-selftest-spin-softirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_spin) @@ -810,10 +823,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock) +#endif #undef E1 #undef E2 +#ifndef CONFIG_PREEMPT_RT /* * Enabling hardirqs with a softirq-safe lock held: */ @@ -846,6 +861,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock) #undef E1 #undef E2 +#endif + /* * Enabling irqs with an irq-safe lock held: */ @@ -875,6 +892,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock) #include "locking-selftest-wlock-hardirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_wlock) +#ifndef CONFIG_PREEMPT_RT #include "locking-selftest-spin-softirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_spin) @@ -883,6 +901,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock) +#endif #undef E1 #undef E2 @@ -921,6 +940,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock) #include "locking-selftest-wlock-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_wlock) +#ifndef CONFIG_PREEMPT_RT #include "locking-selftest-spin-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_spin) @@ -929,6 +949,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock) +#endif #undef E1 #undef E2 @@ -969,6 +990,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock) #include "locking-selftest-wlock-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_wlock) +#ifndef CONFIG_PREEMPT_RT #include "locking-selftest-spin-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_spin) @@ -977,6 +999,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock) +#endif #undef E1 #undef E2 @@ -1031,6 +1054,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_rlock) #include "locking-selftest-wlock-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_wlock) +#ifndef CONFIG_PREEMPT_RT #include "locking-selftest-spin-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_spin) @@ -1039,6 +1063,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock) +#endif #undef E1 #undef E2 @@ -1206,12 +1231,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_rlock) #include "locking-selftest-wlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_wlock) +#ifndef CONFIG_PREEMPT_RT #include "locking-selftest-softirq.h" #include "locking-selftest-rlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_rlock) #include "locking-selftest-wlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_wlock) +#endif #undef E1 #undef E2 @@ -1252,12 +1279,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_rlock) #include "locking-selftest-wlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_wlock) +#ifndef CONFIG_PREEMPT_RT #include "locking-selftest-softirq.h" #include "locking-selftest-rlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_rlock) #include "locking-selftest-wlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_wlock) +#endif #undef E1 #undef E2 @@ -1306,12 +1335,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_rlock) #include "locking-selftest-wlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_wlock) +#ifndef CONFIG_PREEMPT_RT #include "locking-selftest-softirq.h" #include "locking-selftest-rlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_rlock) #include "locking-selftest-wlock.h" GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock) +#endif #ifdef CONFIG_DEBUG_LOCK_ALLOC # define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map) @@ -1320,7 +1351,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock) # define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map) # define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map) # define I_WW(x) lockdep_reset_lock(&x.dep_map) -# define I_LOCAL_LOCK(x) lockdep_reset_lock(&local_##x.dep_map) +# define I_LOCAL_LOCK(x) lockdep_reset_lock(this_cpu_ptr(&local_##x.dep_map)) #ifdef CONFIG_RT_MUTEXES # define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map) #endif @@ -1380,7 +1411,7 @@ static void reset_locks(void) init_shared_classes(); raw_spin_lock_init(&raw_lock_A); raw_spin_lock_init(&raw_lock_B); - local_lock_init(&local_A); + local_lock_init(this_cpu_ptr(&local_A)); ww_mutex_init(&o, &ww_lockdep); ww_mutex_init(&o2, &ww_lockdep); ww_mutex_init(&o3, &ww_lockdep); memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2)); @@ -1398,7 +1429,13 @@ static int unexpected_testcase_failures; static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) { - unsigned long saved_preempt_count = preempt_count(); + int saved_preempt_count = preempt_count(); +#ifdef CONFIG_PREEMPT_RT +#ifdef CONFIG_SMP + int saved_mgd_count = current->migration_disabled; +#endif + int saved_rcu_count = current->rcu_read_lock_nesting; +#endif WARN_ON(irqs_disabled()); @@ -1432,6 +1469,18 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) * count, so restore it: */ preempt_count_set(saved_preempt_count); + +#ifdef CONFIG_PREEMPT_RT +#ifdef CONFIG_SMP + while (current->migration_disabled > saved_mgd_count) + migrate_enable(); +#endif + + while (current->rcu_read_lock_nesting > saved_rcu_count) + rcu_read_unlock(); + WARN_ON_ONCE(current->rcu_read_lock_nesting < saved_rcu_count); +#endif + #ifdef CONFIG_TRACE_IRQFLAGS if (softirq_count()) current->softirqs_enabled = 0; @@ -1499,7 +1548,7 @@ static inline void print_testname(const char *testname) #define DO_TESTCASE_2x2RW(desc, name, nr) \ DO_TESTCASE_2RW("hard-"desc, name##_hard, nr) \ - DO_TESTCASE_2RW("soft-"desc, name##_soft, nr) \ + NON_RT(DO_TESTCASE_2RW("soft-"desc, name##_soft, nr)) \ #define DO_TESTCASE_6x2x2RW(desc, name) \ DO_TESTCASE_2x2RW(desc, name, 123); \ @@ -1547,19 +1596,19 @@ static inline void print_testname(const char *testname) #define DO_TESTCASE_2I(desc, name, nr) \ DO_TESTCASE_1("hard-"desc, name##_hard, nr); \ - DO_TESTCASE_1("soft-"desc, name##_soft, nr); + NON_RT(DO_TESTCASE_1("soft-"desc, name##_soft, nr)); #define DO_TESTCASE_2IB(desc, name, nr) \ DO_TESTCASE_1B("hard-"desc, name##_hard, nr); \ - DO_TESTCASE_1B("soft-"desc, name##_soft, nr); + NON_RT(DO_TESTCASE_1B("soft-"desc, name##_soft, nr)); #define DO_TESTCASE_6I(desc, name, nr) \ DO_TESTCASE_3("hard-"desc, name##_hard, nr); \ - DO_TESTCASE_3("soft-"desc, name##_soft, nr); + NON_RT(DO_TESTCASE_3("soft-"desc, name##_soft, nr)); #define DO_TESTCASE_6IRW(desc, name, nr) \ DO_TESTCASE_3RW("hard-"desc, name##_hard, nr); \ - DO_TESTCASE_3RW("soft-"desc, name##_soft, nr); + NON_RT(DO_TESTCASE_3RW("soft-"desc, name##_soft, nr)); #define DO_TESTCASE_2x3(desc, name) \ DO_TESTCASE_3(desc, name, 12); \ @@ -1651,6 +1700,22 @@ static void ww_test_fail_acquire(void) #endif } +#ifdef CONFIG_PREEMPT_RT +#define ww_mutex_base_lock(b) rt_mutex_lock(b) +#define ww_mutex_base_trylock(b) rt_mutex_trylock(b) +#define ww_mutex_base_lock_nest_lock(b, b2) rt_mutex_lock_nest_lock(b, b2) +#define ww_mutex_base_lock_interruptible(b) rt_mutex_lock_interruptible(b) +#define ww_mutex_base_lock_killable(b) rt_mutex_lock_killable(b) +#define ww_mutex_base_unlock(b) rt_mutex_unlock(b) +#else +#define ww_mutex_base_lock(b) mutex_lock(b) +#define ww_mutex_base_trylock(b) mutex_trylock(b) +#define ww_mutex_base_lock_nest_lock(b, b2) mutex_lock_nest_lock(b, b2) +#define ww_mutex_base_lock_interruptible(b) mutex_lock_interruptible(b) +#define ww_mutex_base_lock_killable(b) mutex_lock_killable(b) +#define ww_mutex_base_unlock(b) mutex_unlock(b) +#endif + static void ww_test_normal(void) { int ret; @@ -1665,50 +1730,50 @@ static void ww_test_normal(void) /* mutex_lock (and indirectly, mutex_lock_nested) */ o.ctx = (void *)~0UL; - mutex_lock(&o.base); - mutex_unlock(&o.base); + ww_mutex_base_lock(&o.base); + ww_mutex_base_unlock(&o.base); WARN_ON(o.ctx != (void *)~0UL); /* mutex_lock_interruptible (and *_nested) */ o.ctx = (void *)~0UL; - ret = mutex_lock_interruptible(&o.base); + ret = ww_mutex_base_lock_interruptible(&o.base); if (!ret) - mutex_unlock(&o.base); + ww_mutex_base_unlock(&o.base); else WARN_ON(1); WARN_ON(o.ctx != (void *)~0UL); /* mutex_lock_killable (and *_nested) */ o.ctx = (void *)~0UL; - ret = mutex_lock_killable(&o.base); + ret = ww_mutex_base_lock_killable(&o.base); if (!ret) - mutex_unlock(&o.base); + ww_mutex_base_unlock(&o.base); else WARN_ON(1); WARN_ON(o.ctx != (void *)~0UL); /* trylock, succeeding */ o.ctx = (void *)~0UL; - ret = mutex_trylock(&o.base); + ret = ww_mutex_base_trylock(&o.base); WARN_ON(!ret); if (ret) - mutex_unlock(&o.base); + ww_mutex_base_unlock(&o.base); else WARN_ON(1); WARN_ON(o.ctx != (void *)~0UL); /* trylock, failing */ o.ctx = (void *)~0UL; - mutex_lock(&o.base); - ret = mutex_trylock(&o.base); + ww_mutex_base_lock(&o.base); + ret = ww_mutex_base_trylock(&o.base); WARN_ON(ret); - mutex_unlock(&o.base); + ww_mutex_base_unlock(&o.base); WARN_ON(o.ctx != (void *)~0UL); /* nest_lock */ o.ctx = (void *)~0UL; - mutex_lock_nest_lock(&o.base, &t); - mutex_unlock(&o.base); + ww_mutex_base_lock_nest_lock(&o.base, &t); + ww_mutex_base_unlock(&o.base); WARN_ON(o.ctx != (void *)~0UL); } @@ -1721,7 +1786,7 @@ static void ww_test_two_contexts(void) static void ww_test_diff_class(void) { WWAI(&t); -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES t.ww_class = NULL; #endif WWL(&o, &t); @@ -1785,7 +1850,7 @@ static void ww_test_edeadlk_normal(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); o2.ctx = &t2; mutex_release(&o2.base.dep_map, _THIS_IP_); @@ -1801,7 +1866,7 @@ static void ww_test_edeadlk_normal(void) o2.ctx = NULL; mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_); - mutex_unlock(&o2.base); + ww_mutex_base_unlock(&o2.base); WWU(&o); WWL(&o2, &t); @@ -1811,7 +1876,7 @@ static void ww_test_edeadlk_normal_slow(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; @@ -1827,7 +1892,7 @@ static void ww_test_edeadlk_normal_slow(void) o2.ctx = NULL; mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_); - mutex_unlock(&o2.base); + ww_mutex_base_unlock(&o2.base); WWU(&o); ww_mutex_lock_slow(&o2, &t); @@ -1837,7 +1902,7 @@ static void ww_test_edeadlk_no_unlock(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); o2.ctx = &t2; mutex_release(&o2.base.dep_map, _THIS_IP_); @@ -1853,7 +1918,7 @@ static void ww_test_edeadlk_no_unlock(void) o2.ctx = NULL; mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_); - mutex_unlock(&o2.base); + ww_mutex_base_unlock(&o2.base); WWL(&o2, &t); } @@ -1862,7 +1927,7 @@ static void ww_test_edeadlk_no_unlock_slow(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; @@ -1878,7 +1943,7 @@ static void ww_test_edeadlk_no_unlock_slow(void) o2.ctx = NULL; mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_); - mutex_unlock(&o2.base); + ww_mutex_base_unlock(&o2.base); ww_mutex_lock_slow(&o2, &t); } @@ -1887,7 +1952,7 @@ static void ww_test_edeadlk_acquire_more(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; @@ -1908,7 +1973,7 @@ static void ww_test_edeadlk_acquire_more_slow(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; @@ -1929,11 +1994,11 @@ static void ww_test_edeadlk_acquire_more_edeadlk(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; - mutex_lock(&o3.base); + ww_mutex_base_lock(&o3.base); mutex_release(&o3.base.dep_map, _THIS_IP_); o3.ctx = &t2; @@ -1955,11 +2020,11 @@ static void ww_test_edeadlk_acquire_more_edeadlk_slow(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; - mutex_lock(&o3.base); + ww_mutex_base_lock(&o3.base); mutex_release(&o3.base.dep_map, _THIS_IP_); o3.ctx = &t2; @@ -1980,7 +2045,7 @@ static void ww_test_edeadlk_acquire_wrong(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; @@ -2005,7 +2070,7 @@ static void ww_test_edeadlk_acquire_wrong_slow(void) { int ret; - mutex_lock(&o2.base); + ww_mutex_base_lock(&o2.base); mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; @@ -2646,8 +2711,8 @@ static void wait_context_tests(void) static void local_lock_2(void) { - local_lock_acquire(&local_A); /* IRQ-ON */ - local_lock_release(&local_A); + local_lock(&local_A); /* IRQ-ON */ + local_unlock(&local_A); HARDIRQ_ENTER(); spin_lock(&lock_A); /* IN-IRQ */ @@ -2656,18 +2721,18 @@ static void local_lock_2(void) HARDIRQ_DISABLE(); spin_lock(&lock_A); - local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle, false */ - local_lock_release(&local_A); + local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle, false */ + local_unlock(&local_A); spin_unlock(&lock_A); HARDIRQ_ENABLE(); } static void local_lock_3A(void) { - local_lock_acquire(&local_A); /* IRQ-ON */ + local_lock(&local_A); /* IRQ-ON */ spin_lock(&lock_B); /* IRQ-ON */ spin_unlock(&lock_B); - local_lock_release(&local_A); + local_unlock(&local_A); HARDIRQ_ENTER(); spin_lock(&lock_A); /* IN-IRQ */ @@ -2676,18 +2741,18 @@ static void local_lock_3A(void) HARDIRQ_DISABLE(); spin_lock(&lock_A); - local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */ - local_lock_release(&local_A); + local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */ + local_unlock(&local_A); spin_unlock(&lock_A); HARDIRQ_ENABLE(); } static void local_lock_3B(void) { - local_lock_acquire(&local_A); /* IRQ-ON */ + local_lock(&local_A); /* IRQ-ON */ spin_lock(&lock_B); /* IRQ-ON */ spin_unlock(&lock_B); - local_lock_release(&local_A); + local_unlock(&local_A); HARDIRQ_ENTER(); spin_lock(&lock_A); /* IN-IRQ */ @@ -2696,8 +2761,8 @@ static void local_lock_3B(void) HARDIRQ_DISABLE(); spin_lock(&lock_A); - local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */ - local_lock_release(&local_A); + local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */ + local_unlock(&local_A); spin_unlock(&lock_A); HARDIRQ_ENABLE(); @@ -2812,7 +2877,7 @@ void locking_selftest(void) printk("------------------------\n"); printk("| Locking API testsuite:\n"); printk("----------------------------------------------------------------------------\n"); - printk(" | spin |wlock |rlock |mutex | wsem | rsem |\n"); + printk(" | spin |wlock |rlock |mutex | wsem | rsem |rtmutex\n"); printk(" --------------------------------------------------------------------------\n"); init_shared_classes(); @@ -2885,12 +2950,11 @@ void locking_selftest(void) DO_TESTCASE_6x1RR("rlock W1R2/R2R3/W3W1", W1R2_R2R3_W3W1); printk(" --------------------------------------------------------------------------\n"); - /* * irq-context testcases: */ DO_TESTCASE_2x6("irqs-on + irq-safe-A", irqsafe1); - DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A); + NON_RT(DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A)); DO_TESTCASE_2x6("safe-A + irqs-on", irqsafe2B); DO_TESTCASE_6x6("safe-A + unsafe-B #1", irqsafe3); DO_TESTCASE_6x6("safe-A + unsafe-B #2", irqsafe4); diff --git a/lib/logic_iomem.c b/lib/logic_iomem.c index 9bdfde0c0f86..8c3365f26e51 100644 --- a/lib/logic_iomem.c +++ b/lib/logic_iomem.c @@ -21,15 +21,15 @@ struct logic_iomem_area { #define AREA_SHIFT 24 #define MAX_AREA_SIZE (1 << AREA_SHIFT) -#define MAX_AREAS ((1ULL<<32) / MAX_AREA_SIZE) +#define MAX_AREAS ((1U << 31) / MAX_AREA_SIZE) #define AREA_BITS ((MAX_AREAS - 1) << AREA_SHIFT) #define AREA_MASK (MAX_AREA_SIZE - 1) #ifdef CONFIG_64BIT #define IOREMAP_BIAS 0xDEAD000000000000UL #define IOREMAP_MASK 0xFFFFFFFF00000000UL #else -#define IOREMAP_BIAS 0 -#define IOREMAP_MASK 0 +#define IOREMAP_BIAS 0x80000000UL +#define IOREMAP_MASK 0x80000000UL #endif static DEFINE_MUTEX(regions_mtx); @@ -76,10 +76,10 @@ static void __iomem *real_ioremap(phys_addr_t offset, size_t size) return NULL; } -static void real_iounmap(void __iomem *addr) +static void real_iounmap(volatile void __iomem *addr) { WARN(1, "invalid iounmap for addr 0x%llx\n", - (unsigned long long __force)addr); + (unsigned long long)(uintptr_t __force)addr); } #endif /* CONFIG_LOGIC_IOMEM_FALLBACK */ @@ -149,7 +149,7 @@ get_area(const volatile void __iomem *addr) return NULL; } -void iounmap(void __iomem *addr) +void iounmap(volatile void __iomem *addr) { struct logic_iomem_area *area = get_area(addr); @@ -173,7 +173,7 @@ EXPORT_SYMBOL(iounmap); static u##sz real_raw_read ## op(const volatile void __iomem *addr) \ { \ WARN(1, "Invalid read" #op " at address %llx\n", \ - (unsigned long long __force)addr); \ + (unsigned long long)(uintptr_t __force)addr); \ return (u ## sz)~0ULL; \ } \ \ @@ -181,7 +181,8 @@ static void real_raw_write ## op(u ## sz val, \ volatile void __iomem *addr) \ { \ WARN(1, "Invalid writeq" #op " of 0x%llx at address %llx\n", \ - (unsigned long long)val, (unsigned long long __force)addr);\ + (unsigned long long)val, \ + (unsigned long long)(uintptr_t __force)addr);\ } \ MAKE_FALLBACK(b, 8); @@ -194,14 +195,14 @@ MAKE_FALLBACK(q, 64); static void real_memset_io(volatile void __iomem *addr, int value, size_t size) { WARN(1, "Invalid memset_io at address 0x%llx\n", - (unsigned long long __force)addr); + (unsigned long long)(uintptr_t __force)addr); } static void real_memcpy_fromio(void *buffer, const volatile void __iomem *addr, size_t size) { WARN(1, "Invalid memcpy_fromio at address 0x%llx\n", - (unsigned long long __force)addr); + (unsigned long long)(uintptr_t __force)addr); memset(buffer, 0xff, size); } @@ -210,7 +211,7 @@ static void real_memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size) { WARN(1, "Invalid memcpy_toio at address 0x%llx\n", - (unsigned long long __force)addr); + (unsigned long long)(uintptr_t __force)addr); } #endif /* CONFIG_LOGIC_IOMEM_FALLBACK */ diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h index 673bd206aa98..330aa539b46e 100644 --- a/lib/lz4/lz4defs.h +++ b/lib/lz4/lz4defs.h @@ -36,6 +36,8 @@ */ #include <asm/unaligned.h> + +#include <linux/bitops.h> #include <linux/string.h> /* memset, memcpy */ #define FORCE_INLINE __always_inline diff --git a/lib/mpi/mpi-bit.c b/lib/mpi/mpi-bit.c index 142b680835df..070ba784c9f1 100644 --- a/lib/mpi/mpi-bit.c +++ b/lib/mpi/mpi-bit.c @@ -242,6 +242,7 @@ void mpi_rshift(MPI x, MPI a, unsigned int n) } MPN_NORMALIZE(x->d, x->nlimbs); } +EXPORT_SYMBOL_GPL(mpi_rshift); /**************** * Shift A by COUNT limbs to the left diff --git a/lib/mpi/mpi-mod.c b/lib/mpi/mpi-mod.c index 47bc59edd4ff..54fcc01564d9 100644 --- a/lib/mpi/mpi-mod.c +++ b/lib/mpi/mpi-mod.c @@ -40,6 +40,8 @@ mpi_barrett_t mpi_barrett_init(MPI m, int copy) mpi_normalize(m); ctx = kcalloc(1, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return NULL; if (copy) { ctx->m = mpi_copy(m); diff --git a/lib/test_overflow.c b/lib/overflow_kunit.c index 7a4b6f6c5473..475f0c064bf6 100644 --- a/lib/test_overflow.c +++ b/lib/overflow_kunit.c @@ -1,11 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /* - * Test cases for arithmetic overflow checks. + * Test cases for arithmetic overflow checks. See: + * https://www.kernel.org/doc/html/latest/dev-tools/kunit/kunit-tool.html#configuring-building-and-running-tests + * ./tools/testing/kunit/kunit.py run overflow [--raw_output] */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <kunit/test.h> #include <linux/device.h> -#include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> @@ -19,7 +21,7 @@ t a, b; \ t sum, diff, prod; \ bool s_of, d_of, p_of; \ - } t ## _tests[] __initconst + } t ## _tests[] DEFINE_TEST_ARRAY(u8) = { {0, 0, 0, 0, 0, false, false, false}, @@ -220,43 +222,31 @@ DEFINE_TEST_ARRAY(s64) = { bool _of; \ \ _of = check_ ## op ## _overflow(a, b, &_r); \ - if (_of != of) { \ - pr_warn("expected "fmt" "sym" "fmt \ - " to%s overflow (type %s)\n", \ - a, b, of ? "" : " not", #t); \ - err = 1; \ - } \ - if (_r != r) { \ - pr_warn("expected "fmt" "sym" "fmt" == " \ - fmt", got "fmt" (type %s)\n", \ - a, b, r, _r, #t); \ - err = 1; \ - } \ + KUNIT_EXPECT_EQ_MSG(test, _of, of, \ + "expected "fmt" "sym" "fmt" to%s overflow (type %s)\n", \ + a, b, of ? "" : " not", #t); \ + KUNIT_EXPECT_EQ_MSG(test, _r, r, \ + "expected "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \ + a, b, r, _r, #t); \ } while (0) #define DEFINE_TEST_FUNC(t, fmt) \ -static int __init do_test_ ## t(const struct test_ ## t *p) \ +static void do_test_ ## t(struct kunit *test, const struct test_ ## t *p) \ { \ - int err = 0; \ - \ check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \ check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \ check_one_op(t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of); \ check_one_op(t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of); \ check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \ - \ - return err; \ } \ \ -static int __init test_ ## t ## _overflow(void) { \ - int err = 0; \ +static void t ## _overflow_test(struct kunit *test) { \ unsigned i; \ \ - pr_info("%-3s: %zu arithmetic tests\n", #t, \ - ARRAY_SIZE(t ## _tests)); \ for (i = 0; i < ARRAY_SIZE(t ## _tests); ++i) \ - err |= do_test_ ## t(&t ## _tests[i]); \ - return err; \ + do_test_ ## t(test, &t ## _tests[i]); \ + kunit_info(test, "%zu %s arithmetic tests finished\n", \ + ARRAY_SIZE(t ## _tests), #t); \ } DEFINE_TEST_FUNC(u8, "%d"); @@ -270,199 +260,176 @@ DEFINE_TEST_FUNC(u64, "%llu"); DEFINE_TEST_FUNC(s64, "%lld"); #endif -static int __init test_overflow_calculation(void) -{ - int err = 0; - - err |= test_u8_overflow(); - err |= test_s8_overflow(); - err |= test_u16_overflow(); - err |= test_s16_overflow(); - err |= test_u32_overflow(); - err |= test_s32_overflow(); -#if BITS_PER_LONG == 64 - err |= test_u64_overflow(); - err |= test_s64_overflow(); -#endif - - return err; -} - -static int __init test_overflow_shift(void) +static void overflow_shift_test(struct kunit *test) { - int err = 0; + int count = 0; /* Args are: value, shift, type, expected result, overflow expected */ -#define TEST_ONE_SHIFT(a, s, t, expect, of) ({ \ - int __failed = 0; \ +#define TEST_ONE_SHIFT(a, s, t, expect, of) do { \ typeof(a) __a = (a); \ typeof(s) __s = (s); \ t __e = (expect); \ t __d; \ bool __of = check_shl_overflow(__a, __s, &__d); \ if (__of != of) { \ - pr_warn("expected (%s)(%s << %s) to%s overflow\n", \ + KUNIT_EXPECT_EQ_MSG(test, __of, of, \ + "expected (%s)(%s << %s) to%s overflow\n", \ #t, #a, #s, of ? "" : " not"); \ - __failed = 1; \ } else if (!__of && __d != __e) { \ - pr_warn("expected (%s)(%s << %s) == %s\n", \ + KUNIT_EXPECT_EQ_MSG(test, __d, __e, \ + "expected (%s)(%s << %s) == %s\n", \ #t, #a, #s, #expect); \ if ((t)-1 < 0) \ - pr_warn("got %lld\n", (s64)__d); \ + kunit_info(test, "got %lld\n", (s64)__d); \ else \ - pr_warn("got %llu\n", (u64)__d); \ - __failed = 1; \ + kunit_info(test, "got %llu\n", (u64)__d); \ } \ - if (!__failed) \ - pr_info("ok: (%s)(%s << %s) == %s\n", #t, #a, #s, \ - of ? "overflow" : #expect); \ - __failed; \ -}) + count++; \ +} while (0) /* Sane shifts. */ - err |= TEST_ONE_SHIFT(1, 0, u8, 1 << 0, false); - err |= TEST_ONE_SHIFT(1, 4, u8, 1 << 4, false); - err |= TEST_ONE_SHIFT(1, 7, u8, 1 << 7, false); - err |= TEST_ONE_SHIFT(0xF, 4, u8, 0xF << 4, false); - err |= TEST_ONE_SHIFT(1, 0, u16, 1 << 0, false); - err |= TEST_ONE_SHIFT(1, 10, u16, 1 << 10, false); - err |= TEST_ONE_SHIFT(1, 15, u16, 1 << 15, false); - err |= TEST_ONE_SHIFT(0xFF, 8, u16, 0xFF << 8, false); - err |= TEST_ONE_SHIFT(1, 0, int, 1 << 0, false); - err |= TEST_ONE_SHIFT(1, 16, int, 1 << 16, false); - err |= TEST_ONE_SHIFT(1, 30, int, 1 << 30, false); - err |= TEST_ONE_SHIFT(1, 0, s32, 1 << 0, false); - err |= TEST_ONE_SHIFT(1, 16, s32, 1 << 16, false); - err |= TEST_ONE_SHIFT(1, 30, s32, 1 << 30, false); - err |= TEST_ONE_SHIFT(1, 0, unsigned int, 1U << 0, false); - err |= TEST_ONE_SHIFT(1, 20, unsigned int, 1U << 20, false); - err |= TEST_ONE_SHIFT(1, 31, unsigned int, 1U << 31, false); - err |= TEST_ONE_SHIFT(0xFFFFU, 16, unsigned int, 0xFFFFU << 16, false); - err |= TEST_ONE_SHIFT(1, 0, u32, 1U << 0, false); - err |= TEST_ONE_SHIFT(1, 20, u32, 1U << 20, false); - err |= TEST_ONE_SHIFT(1, 31, u32, 1U << 31, false); - err |= TEST_ONE_SHIFT(0xFFFFU, 16, u32, 0xFFFFU << 16, false); - err |= TEST_ONE_SHIFT(1, 0, u64, 1ULL << 0, false); - err |= TEST_ONE_SHIFT(1, 40, u64, 1ULL << 40, false); - err |= TEST_ONE_SHIFT(1, 63, u64, 1ULL << 63, false); - err |= TEST_ONE_SHIFT(0xFFFFFFFFULL, 32, u64, - 0xFFFFFFFFULL << 32, false); + TEST_ONE_SHIFT(1, 0, u8, 1 << 0, false); + TEST_ONE_SHIFT(1, 4, u8, 1 << 4, false); + TEST_ONE_SHIFT(1, 7, u8, 1 << 7, false); + TEST_ONE_SHIFT(0xF, 4, u8, 0xF << 4, false); + TEST_ONE_SHIFT(1, 0, u16, 1 << 0, false); + TEST_ONE_SHIFT(1, 10, u16, 1 << 10, false); + TEST_ONE_SHIFT(1, 15, u16, 1 << 15, false); + TEST_ONE_SHIFT(0xFF, 8, u16, 0xFF << 8, false); + TEST_ONE_SHIFT(1, 0, int, 1 << 0, false); + TEST_ONE_SHIFT(1, 16, int, 1 << 16, false); + TEST_ONE_SHIFT(1, 30, int, 1 << 30, false); + TEST_ONE_SHIFT(1, 0, s32, 1 << 0, false); + TEST_ONE_SHIFT(1, 16, s32, 1 << 16, false); + TEST_ONE_SHIFT(1, 30, s32, 1 << 30, false); + TEST_ONE_SHIFT(1, 0, unsigned int, 1U << 0, false); + TEST_ONE_SHIFT(1, 20, unsigned int, 1U << 20, false); + TEST_ONE_SHIFT(1, 31, unsigned int, 1U << 31, false); + TEST_ONE_SHIFT(0xFFFFU, 16, unsigned int, 0xFFFFU << 16, false); + TEST_ONE_SHIFT(1, 0, u32, 1U << 0, false); + TEST_ONE_SHIFT(1, 20, u32, 1U << 20, false); + TEST_ONE_SHIFT(1, 31, u32, 1U << 31, false); + TEST_ONE_SHIFT(0xFFFFU, 16, u32, 0xFFFFU << 16, false); + TEST_ONE_SHIFT(1, 0, u64, 1ULL << 0, false); + TEST_ONE_SHIFT(1, 40, u64, 1ULL << 40, false); + TEST_ONE_SHIFT(1, 63, u64, 1ULL << 63, false); + TEST_ONE_SHIFT(0xFFFFFFFFULL, 32, u64, 0xFFFFFFFFULL << 32, false); /* Sane shift: start and end with 0, without a too-wide shift. */ - err |= TEST_ONE_SHIFT(0, 7, u8, 0, false); - err |= TEST_ONE_SHIFT(0, 15, u16, 0, false); - err |= TEST_ONE_SHIFT(0, 31, unsigned int, 0, false); - err |= TEST_ONE_SHIFT(0, 31, u32, 0, false); - err |= TEST_ONE_SHIFT(0, 63, u64, 0, false); + TEST_ONE_SHIFT(0, 7, u8, 0, false); + TEST_ONE_SHIFT(0, 15, u16, 0, false); + TEST_ONE_SHIFT(0, 31, unsigned int, 0, false); + TEST_ONE_SHIFT(0, 31, u32, 0, false); + TEST_ONE_SHIFT(0, 63, u64, 0, false); /* Sane shift: start and end with 0, without reaching signed bit. */ - err |= TEST_ONE_SHIFT(0, 6, s8, 0, false); - err |= TEST_ONE_SHIFT(0, 14, s16, 0, false); - err |= TEST_ONE_SHIFT(0, 30, int, 0, false); - err |= TEST_ONE_SHIFT(0, 30, s32, 0, false); - err |= TEST_ONE_SHIFT(0, 62, s64, 0, false); + TEST_ONE_SHIFT(0, 6, s8, 0, false); + TEST_ONE_SHIFT(0, 14, s16, 0, false); + TEST_ONE_SHIFT(0, 30, int, 0, false); + TEST_ONE_SHIFT(0, 30, s32, 0, false); + TEST_ONE_SHIFT(0, 62, s64, 0, false); /* Overflow: shifted the bit off the end. */ - err |= TEST_ONE_SHIFT(1, 8, u8, 0, true); - err |= TEST_ONE_SHIFT(1, 16, u16, 0, true); - err |= TEST_ONE_SHIFT(1, 32, unsigned int, 0, true); - err |= TEST_ONE_SHIFT(1, 32, u32, 0, true); - err |= TEST_ONE_SHIFT(1, 64, u64, 0, true); + TEST_ONE_SHIFT(1, 8, u8, 0, true); + TEST_ONE_SHIFT(1, 16, u16, 0, true); + TEST_ONE_SHIFT(1, 32, unsigned int, 0, true); + TEST_ONE_SHIFT(1, 32, u32, 0, true); + TEST_ONE_SHIFT(1, 64, u64, 0, true); /* Overflow: shifted into the signed bit. */ - err |= TEST_ONE_SHIFT(1, 7, s8, 0, true); - err |= TEST_ONE_SHIFT(1, 15, s16, 0, true); - err |= TEST_ONE_SHIFT(1, 31, int, 0, true); - err |= TEST_ONE_SHIFT(1, 31, s32, 0, true); - err |= TEST_ONE_SHIFT(1, 63, s64, 0, true); + TEST_ONE_SHIFT(1, 7, s8, 0, true); + TEST_ONE_SHIFT(1, 15, s16, 0, true); + TEST_ONE_SHIFT(1, 31, int, 0, true); + TEST_ONE_SHIFT(1, 31, s32, 0, true); + TEST_ONE_SHIFT(1, 63, s64, 0, true); /* Overflow: high bit falls off unsigned types. */ /* 10010110 */ - err |= TEST_ONE_SHIFT(150, 1, u8, 0, true); + TEST_ONE_SHIFT(150, 1, u8, 0, true); /* 1000100010010110 */ - err |= TEST_ONE_SHIFT(34966, 1, u16, 0, true); + TEST_ONE_SHIFT(34966, 1, u16, 0, true); /* 10000100000010001000100010010110 */ - err |= TEST_ONE_SHIFT(2215151766U, 1, u32, 0, true); - err |= TEST_ONE_SHIFT(2215151766U, 1, unsigned int, 0, true); + TEST_ONE_SHIFT(2215151766U, 1, u32, 0, true); + TEST_ONE_SHIFT(2215151766U, 1, unsigned int, 0, true); /* 1000001000010000010000000100000010000100000010001000100010010110 */ - err |= TEST_ONE_SHIFT(9372061470395238550ULL, 1, u64, 0, true); + TEST_ONE_SHIFT(9372061470395238550ULL, 1, u64, 0, true); /* Overflow: bit shifted into signed bit on signed types. */ /* 01001011 */ - err |= TEST_ONE_SHIFT(75, 1, s8, 0, true); + TEST_ONE_SHIFT(75, 1, s8, 0, true); /* 0100010001001011 */ - err |= TEST_ONE_SHIFT(17483, 1, s16, 0, true); + TEST_ONE_SHIFT(17483, 1, s16, 0, true); /* 01000010000001000100010001001011 */ - err |= TEST_ONE_SHIFT(1107575883, 1, s32, 0, true); - err |= TEST_ONE_SHIFT(1107575883, 1, int, 0, true); + TEST_ONE_SHIFT(1107575883, 1, s32, 0, true); + TEST_ONE_SHIFT(1107575883, 1, int, 0, true); /* 0100000100001000001000000010000001000010000001000100010001001011 */ - err |= TEST_ONE_SHIFT(4686030735197619275LL, 1, s64, 0, true); + TEST_ONE_SHIFT(4686030735197619275LL, 1, s64, 0, true); /* Overflow: bit shifted past signed bit on signed types. */ /* 01001011 */ - err |= TEST_ONE_SHIFT(75, 2, s8, 0, true); + TEST_ONE_SHIFT(75, 2, s8, 0, true); /* 0100010001001011 */ - err |= TEST_ONE_SHIFT(17483, 2, s16, 0, true); + TEST_ONE_SHIFT(17483, 2, s16, 0, true); /* 01000010000001000100010001001011 */ - err |= TEST_ONE_SHIFT(1107575883, 2, s32, 0, true); - err |= TEST_ONE_SHIFT(1107575883, 2, int, 0, true); + TEST_ONE_SHIFT(1107575883, 2, s32, 0, true); + TEST_ONE_SHIFT(1107575883, 2, int, 0, true); /* 0100000100001000001000000010000001000010000001000100010001001011 */ - err |= TEST_ONE_SHIFT(4686030735197619275LL, 2, s64, 0, true); + TEST_ONE_SHIFT(4686030735197619275LL, 2, s64, 0, true); /* Overflow: values larger than destination type. */ - err |= TEST_ONE_SHIFT(0x100, 0, u8, 0, true); - err |= TEST_ONE_SHIFT(0xFF, 0, s8, 0, true); - err |= TEST_ONE_SHIFT(0x10000U, 0, u16, 0, true); - err |= TEST_ONE_SHIFT(0xFFFFU, 0, s16, 0, true); - err |= TEST_ONE_SHIFT(0x100000000ULL, 0, u32, 0, true); - err |= TEST_ONE_SHIFT(0x100000000ULL, 0, unsigned int, 0, true); - err |= TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, s32, 0, true); - err |= TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, int, 0, true); - err |= TEST_ONE_SHIFT(0xFFFFFFFFFFFFFFFFULL, 0, s64, 0, true); + TEST_ONE_SHIFT(0x100, 0, u8, 0, true); + TEST_ONE_SHIFT(0xFF, 0, s8, 0, true); + TEST_ONE_SHIFT(0x10000U, 0, u16, 0, true); + TEST_ONE_SHIFT(0xFFFFU, 0, s16, 0, true); + TEST_ONE_SHIFT(0x100000000ULL, 0, u32, 0, true); + TEST_ONE_SHIFT(0x100000000ULL, 0, unsigned int, 0, true); + TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, s32, 0, true); + TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, int, 0, true); + TEST_ONE_SHIFT(0xFFFFFFFFFFFFFFFFULL, 0, s64, 0, true); /* Nonsense: negative initial value. */ - err |= TEST_ONE_SHIFT(-1, 0, s8, 0, true); - err |= TEST_ONE_SHIFT(-1, 0, u8, 0, true); - err |= TEST_ONE_SHIFT(-5, 0, s16, 0, true); - err |= TEST_ONE_SHIFT(-5, 0, u16, 0, true); - err |= TEST_ONE_SHIFT(-10, 0, int, 0, true); - err |= TEST_ONE_SHIFT(-10, 0, unsigned int, 0, true); - err |= TEST_ONE_SHIFT(-100, 0, s32, 0, true); - err |= TEST_ONE_SHIFT(-100, 0, u32, 0, true); - err |= TEST_ONE_SHIFT(-10000, 0, s64, 0, true); - err |= TEST_ONE_SHIFT(-10000, 0, u64, 0, true); + TEST_ONE_SHIFT(-1, 0, s8, 0, true); + TEST_ONE_SHIFT(-1, 0, u8, 0, true); + TEST_ONE_SHIFT(-5, 0, s16, 0, true); + TEST_ONE_SHIFT(-5, 0, u16, 0, true); + TEST_ONE_SHIFT(-10, 0, int, 0, true); + TEST_ONE_SHIFT(-10, 0, unsigned int, 0, true); + TEST_ONE_SHIFT(-100, 0, s32, 0, true); + TEST_ONE_SHIFT(-100, 0, u32, 0, true); + TEST_ONE_SHIFT(-10000, 0, s64, 0, true); + TEST_ONE_SHIFT(-10000, 0, u64, 0, true); /* Nonsense: negative shift values. */ - err |= TEST_ONE_SHIFT(0, -5, s8, 0, true); - err |= TEST_ONE_SHIFT(0, -5, u8, 0, true); - err |= TEST_ONE_SHIFT(0, -10, s16, 0, true); - err |= TEST_ONE_SHIFT(0, -10, u16, 0, true); - err |= TEST_ONE_SHIFT(0, -15, int, 0, true); - err |= TEST_ONE_SHIFT(0, -15, unsigned int, 0, true); - err |= TEST_ONE_SHIFT(0, -20, s32, 0, true); - err |= TEST_ONE_SHIFT(0, -20, u32, 0, true); - err |= TEST_ONE_SHIFT(0, -30, s64, 0, true); - err |= TEST_ONE_SHIFT(0, -30, u64, 0, true); + TEST_ONE_SHIFT(0, -5, s8, 0, true); + TEST_ONE_SHIFT(0, -5, u8, 0, true); + TEST_ONE_SHIFT(0, -10, s16, 0, true); + TEST_ONE_SHIFT(0, -10, u16, 0, true); + TEST_ONE_SHIFT(0, -15, int, 0, true); + TEST_ONE_SHIFT(0, -15, unsigned int, 0, true); + TEST_ONE_SHIFT(0, -20, s32, 0, true); + TEST_ONE_SHIFT(0, -20, u32, 0, true); + TEST_ONE_SHIFT(0, -30, s64, 0, true); + TEST_ONE_SHIFT(0, -30, u64, 0, true); /* Overflow: shifted at or beyond entire type's bit width. */ - err |= TEST_ONE_SHIFT(0, 8, u8, 0, true); - err |= TEST_ONE_SHIFT(0, 9, u8, 0, true); - err |= TEST_ONE_SHIFT(0, 8, s8, 0, true); - err |= TEST_ONE_SHIFT(0, 9, s8, 0, true); - err |= TEST_ONE_SHIFT(0, 16, u16, 0, true); - err |= TEST_ONE_SHIFT(0, 17, u16, 0, true); - err |= TEST_ONE_SHIFT(0, 16, s16, 0, true); - err |= TEST_ONE_SHIFT(0, 17, s16, 0, true); - err |= TEST_ONE_SHIFT(0, 32, u32, 0, true); - err |= TEST_ONE_SHIFT(0, 33, u32, 0, true); - err |= TEST_ONE_SHIFT(0, 32, int, 0, true); - err |= TEST_ONE_SHIFT(0, 33, int, 0, true); - err |= TEST_ONE_SHIFT(0, 32, s32, 0, true); - err |= TEST_ONE_SHIFT(0, 33, s32, 0, true); - err |= TEST_ONE_SHIFT(0, 64, u64, 0, true); - err |= TEST_ONE_SHIFT(0, 65, u64, 0, true); - err |= TEST_ONE_SHIFT(0, 64, s64, 0, true); - err |= TEST_ONE_SHIFT(0, 65, s64, 0, true); + TEST_ONE_SHIFT(0, 8, u8, 0, true); + TEST_ONE_SHIFT(0, 9, u8, 0, true); + TEST_ONE_SHIFT(0, 8, s8, 0, true); + TEST_ONE_SHIFT(0, 9, s8, 0, true); + TEST_ONE_SHIFT(0, 16, u16, 0, true); + TEST_ONE_SHIFT(0, 17, u16, 0, true); + TEST_ONE_SHIFT(0, 16, s16, 0, true); + TEST_ONE_SHIFT(0, 17, s16, 0, true); + TEST_ONE_SHIFT(0, 32, u32, 0, true); + TEST_ONE_SHIFT(0, 33, u32, 0, true); + TEST_ONE_SHIFT(0, 32, int, 0, true); + TEST_ONE_SHIFT(0, 33, int, 0, true); + TEST_ONE_SHIFT(0, 32, s32, 0, true); + TEST_ONE_SHIFT(0, 33, s32, 0, true); + TEST_ONE_SHIFT(0, 64, u64, 0, true); + TEST_ONE_SHIFT(0, 65, u64, 0, true); + TEST_ONE_SHIFT(0, 64, s64, 0, true); + TEST_ONE_SHIFT(0, 65, s64, 0, true); /* * Corner case: for unsigned types, we fail when we've shifted @@ -473,13 +440,14 @@ static int __init test_overflow_shift(void) * signed bit). So, for now, we will test this condition but * mark it as not expected to overflow. */ - err |= TEST_ONE_SHIFT(0, 7, s8, 0, false); - err |= TEST_ONE_SHIFT(0, 15, s16, 0, false); - err |= TEST_ONE_SHIFT(0, 31, int, 0, false); - err |= TEST_ONE_SHIFT(0, 31, s32, 0, false); - err |= TEST_ONE_SHIFT(0, 63, s64, 0, false); - - return err; + TEST_ONE_SHIFT(0, 7, s8, 0, false); + TEST_ONE_SHIFT(0, 15, s16, 0, false); + TEST_ONE_SHIFT(0, 31, int, 0, false); + TEST_ONE_SHIFT(0, 31, s32, 0, false); + TEST_ONE_SHIFT(0, 63, s64, 0, false); + + kunit_info(test, "%d shift tests finished\n", count); +#undef TEST_ONE_SHIFT } /* @@ -499,7 +467,7 @@ static int __init test_overflow_shift(void) #define TEST_SIZE (5 * 4096) #define DEFINE_TEST_ALLOC(func, free_func, want_arg, want_gfp, want_node)\ -static int __init test_ ## func (void *arg) \ +static void test_ ## func (struct kunit *test, void *arg) \ { \ volatile size_t a = TEST_SIZE; \ volatile size_t b = (SIZE_MAX / TEST_SIZE) + 1; \ @@ -507,31 +475,24 @@ static int __init test_ ## func (void *arg) \ \ /* Tiny allocation test. */ \ ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, 1);\ - if (!ptr) { \ - pr_warn(#func " failed regular allocation?!\n"); \ - return 1; \ - } \ + KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, \ + #func " failed regular allocation?!\n"); \ free ## want_arg (free_func, arg, ptr); \ \ /* Wrapped allocation test. */ \ ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \ a * b); \ - if (!ptr) { \ - pr_warn(#func " unexpectedly failed bad wrapping?!\n"); \ - return 1; \ - } \ + KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, \ + #func " unexpectedly failed bad wrapping?!\n"); \ free ## want_arg (free_func, arg, ptr); \ \ /* Saturated allocation test. */ \ ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \ array_size(a, b)); \ if (ptr) { \ - pr_warn(#func " missed saturation!\n"); \ + KUNIT_FAIL(test, #func " missed saturation!\n"); \ free ## want_arg (free_func, arg, ptr); \ - return 1; \ } \ - pr_info(#func " detected saturation\n"); \ - return 0; \ } /* @@ -544,10 +505,7 @@ DEFINE_TEST_ALLOC(kmalloc, kfree, 0, 1, 0); DEFINE_TEST_ALLOC(kmalloc_node, kfree, 0, 1, 1); DEFINE_TEST_ALLOC(kzalloc, kfree, 0, 1, 0); DEFINE_TEST_ALLOC(kzalloc_node, kfree, 0, 1, 1); -DEFINE_TEST_ALLOC(vmalloc, vfree, 0, 0, 0); -DEFINE_TEST_ALLOC(vmalloc_node, vfree, 0, 0, 1); -DEFINE_TEST_ALLOC(vzalloc, vfree, 0, 0, 0); -DEFINE_TEST_ALLOC(vzalloc_node, vfree, 0, 0, 1); +DEFINE_TEST_ALLOC(__vmalloc, vfree, 0, 1, 0); DEFINE_TEST_ALLOC(kvmalloc, kvfree, 0, 1, 0); DEFINE_TEST_ALLOC(kvmalloc_node, kvfree, 0, 1, 1); DEFINE_TEST_ALLOC(kvzalloc, kvfree, 0, 1, 0); @@ -555,60 +513,158 @@ DEFINE_TEST_ALLOC(kvzalloc_node, kvfree, 0, 1, 1); DEFINE_TEST_ALLOC(devm_kmalloc, devm_kfree, 1, 1, 0); DEFINE_TEST_ALLOC(devm_kzalloc, devm_kfree, 1, 1, 0); -static int __init test_overflow_allocation(void) +static void overflow_allocation_test(struct kunit *test) { const char device_name[] = "overflow-test"; struct device *dev; - int err = 0; + int count = 0; + +#define check_allocation_overflow(alloc) do { \ + count++; \ + test_ ## alloc(test, dev); \ +} while (0) /* Create dummy device for devm_kmalloc()-family tests. */ dev = root_device_register(device_name); - if (IS_ERR(dev)) { - pr_warn("Cannot register test device\n"); - return 1; - } - - err |= test_kmalloc(NULL); - err |= test_kmalloc_node(NULL); - err |= test_kzalloc(NULL); - err |= test_kzalloc_node(NULL); - err |= test_kvmalloc(NULL); - err |= test_kvmalloc_node(NULL); - err |= test_kvzalloc(NULL); - err |= test_kvzalloc_node(NULL); - err |= test_vmalloc(NULL); - err |= test_vmalloc_node(NULL); - err |= test_vzalloc(NULL); - err |= test_vzalloc_node(NULL); - err |= test_devm_kmalloc(dev); - err |= test_devm_kzalloc(dev); + KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev), + "Cannot register test device\n"); + + check_allocation_overflow(kmalloc); + check_allocation_overflow(kmalloc_node); + check_allocation_overflow(kzalloc); + check_allocation_overflow(kzalloc_node); + check_allocation_overflow(__vmalloc); + check_allocation_overflow(kvmalloc); + check_allocation_overflow(kvmalloc_node); + check_allocation_overflow(kvzalloc); + check_allocation_overflow(kvzalloc_node); + check_allocation_overflow(devm_kmalloc); + check_allocation_overflow(devm_kzalloc); device_unregister(dev); - return err; + kunit_info(test, "%d allocation overflow tests finished\n", count); +#undef check_allocation_overflow } -static int __init test_module_init(void) +struct __test_flex_array { + unsigned long flags; + size_t count; + unsigned long data[]; +}; + +static void overflow_size_helpers_test(struct kunit *test) { - int err = 0; + /* Make sure struct_size() can be used in a constant expression. */ + u8 ce_array[struct_size((struct __test_flex_array *)0, data, 55)]; + struct __test_flex_array *obj; + int count = 0; + int var; + volatile int unconst = 0; + + /* Verify constant expression against runtime version. */ + var = 55; + OPTIMIZER_HIDE_VAR(var); + KUNIT_EXPECT_EQ(test, sizeof(ce_array), struct_size(obj, data, var)); + +#define check_one_size_helper(expected, func, args...) do { \ + size_t _r = func(args); \ + KUNIT_EXPECT_EQ_MSG(test, _r, expected, \ + "expected " #func "(" #args ") to return %zu but got %zu instead\n", \ + (size_t)(expected), _r); \ + count++; \ +} while (0) - err |= test_overflow_calculation(); - err |= test_overflow_shift(); - err |= test_overflow_allocation(); + var = 4; + check_one_size_helper(20, size_mul, var++, 5); + check_one_size_helper(20, size_mul, 4, var++); + check_one_size_helper(0, size_mul, 0, 3); + check_one_size_helper(0, size_mul, 3, 0); + check_one_size_helper(6, size_mul, 2, 3); + check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 1); + check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 3); + check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, -3); + + var = 4; + check_one_size_helper(9, size_add, var++, 5); + check_one_size_helper(9, size_add, 4, var++); + check_one_size_helper(9, size_add, 9, 0); + check_one_size_helper(9, size_add, 0, 9); + check_one_size_helper(5, size_add, 2, 3); + check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 1); + check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 3); + check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, -3); + + var = 4; + check_one_size_helper(1, size_sub, var--, 3); + check_one_size_helper(1, size_sub, 4, var--); + check_one_size_helper(1, size_sub, 3, 2); + check_one_size_helper(9, size_sub, 9, 0); + check_one_size_helper(SIZE_MAX, size_sub, 9, -3); + check_one_size_helper(SIZE_MAX, size_sub, 0, 9); + check_one_size_helper(SIZE_MAX, size_sub, 2, 3); + check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 0); + check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 10); + check_one_size_helper(SIZE_MAX, size_sub, 0, SIZE_MAX); + check_one_size_helper(SIZE_MAX, size_sub, 14, SIZE_MAX); + check_one_size_helper(SIZE_MAX - 2, size_sub, SIZE_MAX - 1, 1); + check_one_size_helper(SIZE_MAX - 4, size_sub, SIZE_MAX - 1, 3); + check_one_size_helper(1, size_sub, SIZE_MAX - 1, -3); + + var = 4; + check_one_size_helper(4 * sizeof(*obj->data), + flex_array_size, obj, data, var++); + check_one_size_helper(5 * sizeof(*obj->data), + flex_array_size, obj, data, var++); + check_one_size_helper(0, flex_array_size, obj, data, 0 + unconst); + check_one_size_helper(sizeof(*obj->data), + flex_array_size, obj, data, 1 + unconst); + check_one_size_helper(7 * sizeof(*obj->data), + flex_array_size, obj, data, 7 + unconst); + check_one_size_helper(SIZE_MAX, + flex_array_size, obj, data, -1 + unconst); + check_one_size_helper(SIZE_MAX, + flex_array_size, obj, data, SIZE_MAX - 4 + unconst); + + var = 4; + check_one_size_helper(sizeof(*obj) + (4 * sizeof(*obj->data)), + struct_size, obj, data, var++); + check_one_size_helper(sizeof(*obj) + (5 * sizeof(*obj->data)), + struct_size, obj, data, var++); + check_one_size_helper(sizeof(*obj), struct_size, obj, data, 0 + unconst); + check_one_size_helper(sizeof(*obj) + sizeof(*obj->data), + struct_size, obj, data, 1 + unconst); + check_one_size_helper(SIZE_MAX, + struct_size, obj, data, -3 + unconst); + check_one_size_helper(SIZE_MAX, + struct_size, obj, data, SIZE_MAX - 3 + unconst); + + kunit_info(test, "%d overflow size helper tests finished\n", count); +#undef check_one_size_helper +} - if (err) { - pr_warn("FAIL!\n"); - err = -EINVAL; - } else { - pr_info("all tests passed\n"); - } +static struct kunit_case overflow_test_cases[] = { + KUNIT_CASE(u8_overflow_test), + KUNIT_CASE(s8_overflow_test), + KUNIT_CASE(u16_overflow_test), + KUNIT_CASE(s16_overflow_test), + KUNIT_CASE(u32_overflow_test), + KUNIT_CASE(s32_overflow_test), +#if BITS_PER_LONG == 64 + KUNIT_CASE(u64_overflow_test), + KUNIT_CASE(s64_overflow_test), +#endif + KUNIT_CASE(overflow_shift_test), + KUNIT_CASE(overflow_allocation_test), + KUNIT_CASE(overflow_size_helpers_test), + {} +}; - return err; -} +static struct kunit_suite overflow_test_suite = { + .name = "overflow", + .test_cases = overflow_test_cases, +}; -static void __exit test_module_exit(void) -{ } +kunit_test_suite(overflow_test_suite); -module_init(test_module_init); -module_exit(test_module_exit); MODULE_LICENSE("Dual MIT/GPL"); diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c index 6d5e5000fdd7..39b74221f4a7 100644 --- a/lib/raid6/algos.c +++ b/lib/raid6/algos.c @@ -145,13 +145,13 @@ static inline const struct raid6_recov_calls *raid6_choose_recov(void) static inline const struct raid6_calls *raid6_choose_gen( void *(*const dptrs)[RAID6_TEST_DISKS], const int disks) { - unsigned long perf, bestgenperf, bestxorperf, j0, j1; + unsigned long perf, bestgenperf, j0, j1; int start = (disks>>1)-1, stop = disks-3; /* work on the second half of the disks */ const struct raid6_calls *const *algo; const struct raid6_calls *best; - for (bestgenperf = 0, bestxorperf = 0, best = NULL, algo = raid6_algos; *algo; algo++) { - if (!best || (*algo)->prefer >= best->prefer) { + for (bestgenperf = 0, best = NULL, algo = raid6_algos; *algo; algo++) { + if (!best || (*algo)->priority >= best->priority) { if ((*algo)->valid && !(*algo)->valid()) continue; @@ -180,50 +180,48 @@ static inline const struct raid6_calls *raid6_choose_gen( pr_info("raid6: %-8s gen() %5ld MB/s\n", (*algo)->name, (perf * HZ * (disks-2)) >> (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2)); + } + } - if (!(*algo)->xor_syndrome) - continue; + if (!best) { + pr_err("raid6: Yikes! No algorithm found!\n"); + goto out; + } - perf = 0; + raid6_call = *best; - preempt_disable(); - j0 = jiffies; - while ((j1 = jiffies) == j0) - cpu_relax(); - while (time_before(jiffies, - j1 + (1<<RAID6_TIME_JIFFIES_LG2))) { - (*algo)->xor_syndrome(disks, start, stop, - PAGE_SIZE, *dptrs); - perf++; - } - preempt_enable(); - - if (best == *algo) - bestxorperf = perf; + if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) { + pr_info("raid6: skipped pq benchmark and selected %s\n", + best->name); + goto out; + } - pr_info("raid6: %-8s xor() %5ld MB/s\n", (*algo)->name, - (perf * HZ * (disks-2)) >> - (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1)); + pr_info("raid6: using algorithm %s gen() %ld MB/s\n", + best->name, + (bestgenperf * HZ * (disks - 2)) >> + (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2)); + + if (best->xor_syndrome) { + perf = 0; + + preempt_disable(); + j0 = jiffies; + while ((j1 = jiffies) == j0) + cpu_relax(); + while (time_before(jiffies, + j1 + (1 << RAID6_TIME_JIFFIES_LG2))) { + best->xor_syndrome(disks, start, stop, + PAGE_SIZE, *dptrs); + perf++; } - } + preempt_enable(); - if (best) { - if (IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) { - pr_info("raid6: using algorithm %s gen() %ld MB/s\n", - best->name, - (bestgenperf * HZ * (disks-2)) >> - (20 - PAGE_SHIFT+RAID6_TIME_JIFFIES_LG2)); - if (best->xor_syndrome) - pr_info("raid6: .... xor() %ld MB/s, rmw enabled\n", - (bestxorperf * HZ * (disks-2)) >> - (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1)); - } else - pr_info("raid6: skip pq benchmark and using algorithm %s\n", - best->name); - raid6_call = *best; - } else - pr_err("raid6: Yikes! No algorithm found!\n"); + pr_info("raid6: .... xor() %ld MB/s, rmw enabled\n", + (perf * HZ * (disks - 2)) >> + (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1)); + } +out: return best; } diff --git a/lib/raid6/avx2.c b/lib/raid6/avx2.c index f299476e1d76..059024234dce 100644 --- a/lib/raid6/avx2.c +++ b/lib/raid6/avx2.c @@ -132,7 +132,7 @@ const struct raid6_calls raid6_avx2x1 = { raid6_avx21_xor_syndrome, raid6_have_avx2, "avx2x1", - 1 /* Has cache hints */ + .priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */ }; /* @@ -262,7 +262,7 @@ const struct raid6_calls raid6_avx2x2 = { raid6_avx22_xor_syndrome, raid6_have_avx2, "avx2x2", - 1 /* Has cache hints */ + .priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */ }; #ifdef CONFIG_X86_64 @@ -465,6 +465,6 @@ const struct raid6_calls raid6_avx2x4 = { raid6_avx24_xor_syndrome, raid6_have_avx2, "avx2x4", - 1 /* Has cache hints */ + .priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */ }; -#endif +#endif /* CONFIG_X86_64 */ diff --git a/lib/raid6/avx512.c b/lib/raid6/avx512.c index bb684d144ee2..9c3e822e1adf 100644 --- a/lib/raid6/avx512.c +++ b/lib/raid6/avx512.c @@ -162,7 +162,7 @@ const struct raid6_calls raid6_avx512x1 = { raid6_avx5121_xor_syndrome, raid6_have_avx512, "avx512x1", - 1 /* Has cache hints */ + .priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */ }; /* @@ -319,7 +319,7 @@ const struct raid6_calls raid6_avx512x2 = { raid6_avx5122_xor_syndrome, raid6_have_avx512, "avx512x2", - 1 /* Has cache hints */ + .priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */ }; #ifdef CONFIG_X86_64 @@ -557,7 +557,7 @@ const struct raid6_calls raid6_avx512x4 = { raid6_avx5124_xor_syndrome, raid6_have_avx512, "avx512x4", - 1 /* Has cache hints */ + .priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */ }; #endif diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile index a4c7cd74cff5..4fb7700a741b 100644 --- a/lib/raid6/test/Makefile +++ b/lib/raid6/test/Makefile @@ -4,6 +4,8 @@ # from userspace. # +pound := \# + CC = gcc OPTFLAGS = -O2 # Adjust as desired CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS) @@ -42,7 +44,7 @@ else ifeq ($(HAS_NEON),yes) OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1 else - HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\ + HAS_ALTIVEC := $(shell printf '$(pound)include <altivec.h>\nvector int a;\n' |\ gcc -c -x c - >/dev/null && rm ./-.o && echo yes) ifeq ($(HAS_ALTIVEC),yes) CFLAGS += -I../../../arch/powerpc/include diff --git a/lib/raid6/test/test.c b/lib/raid6/test/test.c index a3cf071941ab..841a55242aba 100644 --- a/lib/raid6/test/test.c +++ b/lib/raid6/test/test.c @@ -19,7 +19,6 @@ #define NDISKS 16 /* Including P and Q */ const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE))); -struct raid6_calls raid6_call; char *dataptrs[NDISKS]; char data[NDISKS][PAGE_SIZE] __attribute__((aligned(PAGE_SIZE))); diff --git a/lib/raid6/vpermxor.uc b/lib/raid6/vpermxor.uc index 10475dc423c1..1bfb127fbfe8 100644 --- a/lib/raid6/vpermxor.uc +++ b/lib/raid6/vpermxor.uc @@ -24,9 +24,9 @@ #ifdef CONFIG_ALTIVEC #include <altivec.h> +#include <asm/ppc-opcode.h> #ifdef __KERNEL__ #include <asm/cputable.h> -#include <asm/ppc-opcode.h> #include <asm/switch_to.h> #endif diff --git a/lib/random32.c b/lib/random32.c index a57a0e18819d..976632003ec6 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -41,7 +41,6 @@ #include <linux/bitops.h> #include <linux/slab.h> #include <asm/unaligned.h> -#include <trace/events/random.h> /** * prandom_u32_state - seeded pseudo-random number generator. @@ -387,7 +386,6 @@ u32 prandom_u32(void) struct siprand_state *state = get_cpu_ptr(&net_rand_state); u32 res = siprand_u32(state); - trace_prandom_u32(res); put_cpu_ptr(&net_rand_state); return res; } @@ -553,9 +551,11 @@ static void prandom_reseed(struct timer_list *unused) * To avoid worrying about whether it's safe to delay that interrupt * long enough to seed all CPUs, just schedule an immediate timer event. */ -static void prandom_timer_start(struct random_ready_callback *unused) +static int prandom_timer_start(struct notifier_block *nb, + unsigned long action, void *data) { mod_timer(&seed_timer, jiffies); + return 0; } #ifdef CONFIG_RANDOM32_SELFTEST @@ -619,13 +619,13 @@ core_initcall(prandom32_state_selftest); */ static int __init prandom_init_late(void) { - static struct random_ready_callback random_ready = { - .func = prandom_timer_start + static struct notifier_block random_ready = { + .notifier_call = prandom_timer_start }; - int ret = add_random_ready_callback(&random_ready); + int ret = register_random_ready_notifier(&random_ready); if (ret == -EALREADY) { - prandom_timer_start(&random_ready); + prandom_timer_start(&random_ready, 0, NULL); ret = 0; } return ret; diff --git a/lib/ref_tracker.c b/lib/ref_tracker.c index 0ae2e66dcf0f..a6789c0c626b 100644 --- a/lib/ref_tracker.c +++ b/lib/ref_tracker.c @@ -69,9 +69,12 @@ int ref_tracker_alloc(struct ref_tracker_dir *dir, unsigned long entries[REF_TRACKER_STACK_ENTRIES]; struct ref_tracker *tracker; unsigned int nr_entries; + gfp_t gfp_mask = gfp; unsigned long flags; - *trackerp = tracker = kzalloc(sizeof(*tracker), gfp | __GFP_NOFAIL); + if (gfp & __GFP_DIRECT_RECLAIM) + gfp_mask |= __GFP_NOFAIL; + *trackerp = tracker = kzalloc(sizeof(*tracker), gfp_mask); if (unlikely(!tracker)) { pr_err_once("memory allocation failure, unreliable refcount tracker.\n"); refcount_inc(&dir->untracked); diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 2709ab825499..2eb3de18ded3 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -85,7 +85,6 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, bool alloc_hint) { unsigned int bits_per_word; - unsigned int i; if (shift < 0) shift = sbitmap_calculate_shift(depth); @@ -117,10 +116,6 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, return -ENOMEM; } - for (i = 0; i < sb->map_nr; i++) { - sb->map[i].depth = min(depth, bits_per_word); - depth -= sb->map[i].depth; - } return 0; } EXPORT_SYMBOL_GPL(sbitmap_init_node); @@ -135,11 +130,6 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth) sb->depth = depth; sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); - - for (i = 0; i < sb->map_nr; i++) { - sb->map[i].depth = min(depth, bits_per_word); - depth -= sb->map[i].depth; - } } EXPORT_SYMBOL_GPL(sbitmap_resize); @@ -184,8 +174,8 @@ static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index, int nr; do { - nr = __sbitmap_get_word(&map->word, map->depth, alloc_hint, - !sb->round_robin); + nr = __sbitmap_get_word(&map->word, __map_depth(sb, index), + alloc_hint, !sb->round_robin); if (nr != -1) break; if (!sbitmap_deferred_clear(map)) @@ -257,7 +247,9 @@ static int __sbitmap_get_shallow(struct sbitmap *sb, for (i = 0; i < sb->map_nr; i++) { again: nr = __sbitmap_get_word(&sb->map[index].word, - min(sb->map[index].depth, shallow_depth), + min_t(unsigned int, + __map_depth(sb, index), + shallow_depth), SB_NR_TO_BIT(sb, alloc_hint), true); if (nr != -1) { nr += index << sb->shift; @@ -315,11 +307,12 @@ static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set) for (i = 0; i < sb->map_nr; i++) { const struct sbitmap_word *word = &sb->map[i]; + unsigned int word_depth = __map_depth(sb, i); if (set) - weight += bitmap_weight(&word->word, word->depth); + weight += bitmap_weight(&word->word, word_depth); else - weight += bitmap_weight(&word->cleared, word->depth); + weight += bitmap_weight(&word->cleared, word_depth); } return weight; } @@ -367,7 +360,7 @@ void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m) for (i = 0; i < sb->map_nr; i++) { unsigned long word = READ_ONCE(sb->map[i].word); unsigned long cleared = READ_ONCE(sb->map[i].cleared); - unsigned int word_bits = READ_ONCE(sb->map[i].depth); + unsigned int word_bits = __map_depth(sb, i); word &= ~cleared; @@ -457,10 +450,9 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, } EXPORT_SYMBOL_GPL(sbitmap_queue_init_node); -static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, - unsigned int depth) +static inline void __sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, + unsigned int wake_batch) { - unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth); int i; if (sbq->wake_batch != wake_batch) { @@ -476,6 +468,30 @@ static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, } } +static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, + unsigned int depth) +{ + unsigned int wake_batch; + + wake_batch = sbq_calc_wake_batch(sbq, depth); + __sbitmap_queue_update_wake_batch(sbq, wake_batch); +} + +void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq, + unsigned int users) +{ + unsigned int wake_batch; + unsigned int min_batch; + unsigned int depth = (sbq->sb.depth + users - 1) / users; + + min_batch = sbq->sb.depth >= (4 * SBQ_WAIT_QUEUES) ? 4 : 1; + + wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES, + min_batch, SBQ_WAKE_BATCH); + __sbitmap_queue_update_wake_batch(sbq, wake_batch); +} +EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch); + void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) { sbitmap_queue_update_wake_batch(sbq, depth); @@ -508,15 +524,16 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, for (i = 0; i < sb->map_nr; i++) { struct sbitmap_word *map = &sb->map[index]; unsigned long get_mask; + unsigned int map_depth = __map_depth(sb, index); sbitmap_deferred_clear(map); - if (map->word == (1UL << (map->depth - 1)) - 1) + if (map->word == (1UL << (map_depth - 1)) - 1) continue; - nr = find_first_zero_bit(&map->word, map->depth); - if (nr + nr_tags <= map->depth) { + nr = find_first_zero_bit(&map->word, map_depth); + if (nr + nr_tags <= map_depth) { atomic_long_t *ptr = (atomic_long_t *) &map->word; - int map_tags = min_t(int, nr_tags, map->depth); + int map_tags = min_t(int, nr_tags, map_depth); unsigned long val, ret; get_mask = ((1UL << map_tags) - 1) << nr; @@ -540,14 +557,14 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, return 0; } -int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, - unsigned int shallow_depth) +int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, + unsigned int shallow_depth) { WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth); return sbitmap_get_shallow(&sbq->sb, shallow_depth); } -EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow); +EXPORT_SYMBOL_GPL(sbitmap_queue_get_shallow); void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, unsigned int min_shallow_depth) diff --git a/lib/sha1.c b/lib/sha1.c index 9bd1935a1472..0494766fc574 100644 --- a/lib/sha1.c +++ b/lib/sha1.c @@ -9,6 +9,7 @@ #include <linux/kernel.h> #include <linux/export.h> #include <linux/bitops.h> +#include <linux/string.h> #include <crypto/sha1.h> #include <asm/unaligned.h> @@ -55,7 +56,8 @@ #define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \ __u32 TEMP = input(t); setW(t, TEMP); \ E += TEMP + rol32(A,5) + (fn) + (constant); \ - B = ror32(B, 2); } while (0) + B = ror32(B, 2); \ + TEMP = E; E = D; D = C; C = B; B = A; A = TEMP; } while (0) #define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) #define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) @@ -84,6 +86,7 @@ void sha1_transform(__u32 *digest, const char *data, __u32 *array) { __u32 A, B, C, D, E; + unsigned int i = 0; A = digest[0]; B = digest[1]; @@ -92,94 +95,24 @@ void sha1_transform(__u32 *digest, const char *data, __u32 *array) E = digest[4]; /* Round 1 - iterations 0-16 take their input from 'data' */ - T_0_15( 0, A, B, C, D, E); - T_0_15( 1, E, A, B, C, D); - T_0_15( 2, D, E, A, B, C); - T_0_15( 3, C, D, E, A, B); - T_0_15( 4, B, C, D, E, A); - T_0_15( 5, A, B, C, D, E); - T_0_15( 6, E, A, B, C, D); - T_0_15( 7, D, E, A, B, C); - T_0_15( 8, C, D, E, A, B); - T_0_15( 9, B, C, D, E, A); - T_0_15(10, A, B, C, D, E); - T_0_15(11, E, A, B, C, D); - T_0_15(12, D, E, A, B, C); - T_0_15(13, C, D, E, A, B); - T_0_15(14, B, C, D, E, A); - T_0_15(15, A, B, C, D, E); + for (; i < 16; ++i) + T_0_15(i, A, B, C, D, E); /* Round 1 - tail. Input from 512-bit mixing array */ - T_16_19(16, E, A, B, C, D); - T_16_19(17, D, E, A, B, C); - T_16_19(18, C, D, E, A, B); - T_16_19(19, B, C, D, E, A); + for (; i < 20; ++i) + T_16_19(i, A, B, C, D, E); /* Round 2 */ - T_20_39(20, A, B, C, D, E); - T_20_39(21, E, A, B, C, D); - T_20_39(22, D, E, A, B, C); - T_20_39(23, C, D, E, A, B); - T_20_39(24, B, C, D, E, A); - T_20_39(25, A, B, C, D, E); - T_20_39(26, E, A, B, C, D); - T_20_39(27, D, E, A, B, C); - T_20_39(28, C, D, E, A, B); - T_20_39(29, B, C, D, E, A); - T_20_39(30, A, B, C, D, E); - T_20_39(31, E, A, B, C, D); - T_20_39(32, D, E, A, B, C); - T_20_39(33, C, D, E, A, B); - T_20_39(34, B, C, D, E, A); - T_20_39(35, A, B, C, D, E); - T_20_39(36, E, A, B, C, D); - T_20_39(37, D, E, A, B, C); - T_20_39(38, C, D, E, A, B); - T_20_39(39, B, C, D, E, A); + for (; i < 40; ++i) + T_20_39(i, A, B, C, D, E); /* Round 3 */ - T_40_59(40, A, B, C, D, E); - T_40_59(41, E, A, B, C, D); - T_40_59(42, D, E, A, B, C); - T_40_59(43, C, D, E, A, B); - T_40_59(44, B, C, D, E, A); - T_40_59(45, A, B, C, D, E); - T_40_59(46, E, A, B, C, D); - T_40_59(47, D, E, A, B, C); - T_40_59(48, C, D, E, A, B); - T_40_59(49, B, C, D, E, A); - T_40_59(50, A, B, C, D, E); - T_40_59(51, E, A, B, C, D); - T_40_59(52, D, E, A, B, C); - T_40_59(53, C, D, E, A, B); - T_40_59(54, B, C, D, E, A); - T_40_59(55, A, B, C, D, E); - T_40_59(56, E, A, B, C, D); - T_40_59(57, D, E, A, B, C); - T_40_59(58, C, D, E, A, B); - T_40_59(59, B, C, D, E, A); + for (; i < 60; ++i) + T_40_59(i, A, B, C, D, E); /* Round 4 */ - T_60_79(60, A, B, C, D, E); - T_60_79(61, E, A, B, C, D); - T_60_79(62, D, E, A, B, C); - T_60_79(63, C, D, E, A, B); - T_60_79(64, B, C, D, E, A); - T_60_79(65, A, B, C, D, E); - T_60_79(66, E, A, B, C, D); - T_60_79(67, D, E, A, B, C); - T_60_79(68, C, D, E, A, B); - T_60_79(69, B, C, D, E, A); - T_60_79(70, A, B, C, D, E); - T_60_79(71, E, A, B, C, D); - T_60_79(72, D, E, A, B, C); - T_60_79(73, C, D, E, A, B); - T_60_79(74, B, C, D, E, A); - T_60_79(75, A, B, C, D, E); - T_60_79(76, E, A, B, C, D); - T_60_79(77, D, E, A, B, C); - T_60_79(78, C, D, E, A, B); - T_60_79(79, B, C, D, E, A); + for (; i < 80; ++i) + T_60_79(i, A, B, C, D, E); digest[0] += A; digest[1] += B; diff --git a/lib/stackdepot.c b/lib/stackdepot.c index b437ae79aca1..bf5ba9af0500 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -23,6 +23,7 @@ #include <linux/jhash.h> #include <linux/kernel.h> #include <linux/mm.h> +#include <linux/mutex.h> #include <linux/percpu.h> #include <linux/printk.h> #include <linux/slab.h> @@ -161,18 +162,40 @@ static int __init is_stack_depot_disabled(char *str) } early_param("stack_depot_disable", is_stack_depot_disabled); -int __init stack_depot_init(void) +/* + * __ref because of memblock_alloc(), which will not be actually called after + * the __init code is gone, because at that point slab_is_available() is true + */ +__ref int stack_depot_init(void) { - if (!stack_depot_disable) { + static DEFINE_MUTEX(stack_depot_init_mutex); + + mutex_lock(&stack_depot_init_mutex); + if (!stack_depot_disable && !stack_table) { size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *)); int i; - stack_table = memblock_alloc(size, size); - for (i = 0; i < STACK_HASH_SIZE; i++) - stack_table[i] = NULL; + if (slab_is_available()) { + pr_info("Stack Depot allocating hash table with kvmalloc\n"); + stack_table = kvmalloc(size, GFP_KERNEL); + } else { + pr_info("Stack Depot allocating hash table with memblock_alloc\n"); + stack_table = memblock_alloc(size, SMP_CACHE_BYTES); + } + if (stack_table) { + for (i = 0; i < STACK_HASH_SIZE; i++) + stack_table[i] = NULL; + } else { + pr_err("Stack Depot hash table allocation failed, disabling\n"); + stack_depot_disable = true; + mutex_unlock(&stack_depot_init_mutex); + return -ENOMEM; + } } + mutex_unlock(&stack_depot_init_mutex); return 0; } +EXPORT_SYMBOL_GPL(stack_depot_init); /* Calculate hash for a stack */ static inline u32 hash_stack(unsigned long *entries, unsigned int size) @@ -305,6 +328,9 @@ EXPORT_SYMBOL_GPL(stack_depot_fetch); * (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids * any allocations and will fail if no space is left to store the stack trace. * + * If the stack trace in @entries is from an interrupt, only the portion up to + * interrupt entry is saved. + * * Context: Any context, but setting @can_alloc to %false is required if * alloc_pages() cannot be used from the current context. Currently * this is the case from contexts where neither %GFP_ATOMIC nor @@ -323,6 +349,16 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, unsigned long flags; u32 hash; + /* + * If this stack trace is from an interrupt, including anything before + * interrupt entry usually leads to unbounded stackdepot growth. + * + * Because use of filter_irq_stacks() is a requirement to ensure + * stackdepot can efficiently deduplicate interrupt stacks, always + * filter_irq_stacks() to simplify all callers' use of stackdepot. + */ + nr_entries = filter_irq_stacks(entries, nr_entries); + if (unlikely(nr_entries == 0) || stack_depot_disable) goto fast_exit; diff --git a/lib/test_stackinit.c b/lib/stackinit_kunit.c index a3c74e6a21ff..35c69aa425b2 100644 --- a/lib/test_stackinit.c +++ b/lib/stackinit_kunit.c @@ -2,76 +2,21 @@ /* * Test cases for compiler-based stack variable zeroing via * -ftrivial-auto-var-init={zero,pattern} or CONFIG_GCC_PLUGIN_STRUCTLEAK*. + * For example, see: + * https://www.kernel.org/doc/html/latest/dev-tools/kunit/kunit-tool.html#configuring-building-and-running-tests + * ./tools/testing/kunit/kunit.py run stackinit [--raw_output] \ + * --make_option LLVM=1 \ + * --kconfig_add CONFIG_INIT_STACK_ALL_ZERO=y * - * External build example: - * clang -O2 -Wall -ftrivial-auto-var-init=pattern \ - * -o test_stackinit test_stackinit.c */ -#ifdef __KERNEL__ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <kunit/test.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> -#else - -/* Userspace headers. */ -#include <stdio.h> -#include <stdint.h> -#include <string.h> -#include <stdbool.h> -#include <errno.h> -#include <sys/types.h> - -/* Linux kernel-ism stubs for stand-alone userspace build. */ -#define KBUILD_MODNAME "stackinit" -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#define pr_err(fmt, ...) fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__) -#define pr_warn(fmt, ...) fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__) -#define pr_info(fmt, ...) fprintf(stdout, pr_fmt(fmt), ##__VA_ARGS__) -#define __init /**/ -#define __exit /**/ -#define __user /**/ -#define noinline __attribute__((__noinline__)) -#define __aligned(x) __attribute__((__aligned__(x))) -#ifdef __clang__ -# define __compiletime_error(message) /**/ -#else -# define __compiletime_error(message) __attribute__((__error__(message))) -#endif -#define __compiletime_assert(condition, msg, prefix, suffix) \ - do { \ - extern void prefix ## suffix(void) __compiletime_error(msg); \ - if (!(condition)) \ - prefix ## suffix(); \ - } while (0) -#define _compiletime_assert(condition, msg, prefix, suffix) \ - __compiletime_assert(condition, msg, prefix, suffix) -#define compiletime_assert(condition, msg) \ - _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) -#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) -#define BUILD_BUG_ON(condition) \ - BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) -typedef uint8_t u8; -typedef uint16_t u16; -typedef uint32_t u32; -typedef uint64_t u64; - -#define module_init(func) static int (*do_init)(void) = func -#define module_exit(func) static void (*do_exit)(void) = func -#define MODULE_LICENSE(str) int main(void) { \ - int rc; \ - /* License: str */ \ - rc = do_init(); \ - if (rc == 0) \ - do_exit(); \ - return rc; \ - } - -#endif /* __KERNEL__ */ - /* Exfiltration buffer. */ #define MAX_VAR_SIZE 128 static u8 check_buf[MAX_VAR_SIZE]; @@ -201,7 +146,7 @@ static bool range_contains(char *haystack_start, size_t haystack_size, */ #define DEFINE_TEST_DRIVER(name, var_type, which, xfail) \ /* Returns 0 on success, 1 on failure. */ \ -static noinline __init int test_ ## name (void) \ +static noinline void test_ ## name (struct kunit *test) \ { \ var_type zero INIT_CLONE_ ## which; \ int ignored; \ @@ -220,10 +165,8 @@ static noinline __init int test_ ## name (void) \ /* Verify all bytes overwritten with 0xFF. */ \ for (sum = 0, i = 0; i < target_size; i++) \ sum += (check_buf[i] != 0xFF); \ - if (sum) { \ - pr_err(#name ": leaf fill was not 0xFF!?\n"); \ - return 1; \ - } \ + KUNIT_ASSERT_EQ_MSG(test, sum, 0, \ + "leaf fill was not 0xFF!?\n"); \ /* Clear entire check buffer for later bit tests. */ \ memset(check_buf, 0x00, sizeof(check_buf)); \ /* Extract stack-defined variable contents. */ \ @@ -231,32 +174,29 @@ static noinline __init int test_ ## name (void) \ FETCH_ARG_ ## which(zero)); \ \ /* Validate that compiler lined up fill and target. */ \ - if (!range_contains(fill_start, fill_size, \ - target_start, target_size)) { \ - pr_err(#name ": stack fill missed target!?\n"); \ - pr_err(#name ": fill %zu wide\n", fill_size); \ - pr_err(#name ": target offset by %d\n", \ - (int)((ssize_t)(uintptr_t)fill_start - \ - (ssize_t)(uintptr_t)target_start)); \ - return 1; \ - } \ + KUNIT_ASSERT_TRUE_MSG(test, \ + range_contains(fill_start, fill_size, \ + target_start, target_size), \ + "stack fill missed target!? " \ + "(fill %zu wide, target offset by %d)\n", \ + fill_size, \ + (int)((ssize_t)(uintptr_t)fill_start - \ + (ssize_t)(uintptr_t)target_start)); \ \ /* Look for any bytes still 0xFF in check region. */ \ for (sum = 0, i = 0; i < target_size; i++) \ sum += (check_buf[i] == 0xFF); \ \ - if (sum == 0) { \ - pr_info(#name " ok\n"); \ - return 0; \ - } else { \ - pr_warn(#name " %sFAIL (uninit bytes: %d)\n", \ - (xfail) ? "X" : "", sum); \ - return (xfail) ? 0 : 1; \ - } \ + if (sum != 0 && xfail) \ + kunit_skip(test, \ + "XFAIL uninit bytes: %d\n", \ + sum); \ + KUNIT_ASSERT_EQ_MSG(test, sum, 0, \ + "uninit bytes: %d\n", sum); \ } #define DEFINE_TEST(name, var_type, which, init_level, xfail) \ /* no-op to force compiler into ignoring "uninitialized" vars */\ -static noinline __init DO_NOTHING_TYPE_ ## which(var_type) \ +static noinline DO_NOTHING_TYPE_ ## which(var_type) \ do_nothing_ ## name(var_type *ptr) \ { \ /* Will always be true, but compiler doesn't know. */ \ @@ -265,9 +205,8 @@ do_nothing_ ## name(var_type *ptr) \ else \ return DO_NOTHING_RETURN_ ## which(ptr + 1); \ } \ -static noinline __init int leaf_ ## name(unsigned long sp, \ - bool fill, \ - var_type *arg) \ +static noinline int leaf_ ## name(unsigned long sp, bool fill, \ + var_type *arg) \ { \ char buf[VAR_BUFFER]; \ var_type var \ @@ -341,6 +280,27 @@ struct test_user { unsigned long four; }; +#define ALWAYS_PASS WANT_SUCCESS +#define ALWAYS_FAIL XFAIL + +#ifdef CONFIG_INIT_STACK_NONE +# define USER_PASS XFAIL +# define BYREF_PASS XFAIL +# define STRONG_PASS XFAIL +#elif defined(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER) +# define USER_PASS WANT_SUCCESS +# define BYREF_PASS XFAIL +# define STRONG_PASS XFAIL +#elif defined(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF) +# define USER_PASS WANT_SUCCESS +# define BYREF_PASS WANT_SUCCESS +# define STRONG_PASS XFAIL +#else +# define USER_PASS WANT_SUCCESS +# define BYREF_PASS WANT_SUCCESS +# define STRONG_PASS WANT_SUCCESS +#endif + #define DEFINE_SCALAR_TEST(name, init, xfail) \ DEFINE_TEST(name ## _ ## init, name, SCALAR, \ init, xfail) @@ -364,27 +324,26 @@ struct test_user { DEFINE_STRUCT_TEST(trailing_hole, init, xfail); \ DEFINE_STRUCT_TEST(packed, init, xfail) -#define DEFINE_STRUCT_INITIALIZER_TESTS(base) \ +#define DEFINE_STRUCT_INITIALIZER_TESTS(base, xfail) \ DEFINE_STRUCT_TESTS(base ## _ ## partial, \ - WANT_SUCCESS); \ - DEFINE_STRUCT_TESTS(base ## _ ## all, \ - WANT_SUCCESS) + xfail); \ + DEFINE_STRUCT_TESTS(base ## _ ## all, xfail) /* These should be fully initialized all the time! */ -DEFINE_SCALAR_TESTS(zero, WANT_SUCCESS); -DEFINE_STRUCT_TESTS(zero, WANT_SUCCESS); +DEFINE_SCALAR_TESTS(zero, ALWAYS_PASS); +DEFINE_STRUCT_TESTS(zero, ALWAYS_PASS); /* Struct initializers: padding may be left uninitialized. */ -DEFINE_STRUCT_INITIALIZER_TESTS(static); -DEFINE_STRUCT_INITIALIZER_TESTS(dynamic); -DEFINE_STRUCT_INITIALIZER_TESTS(runtime); -DEFINE_STRUCT_INITIALIZER_TESTS(assigned_static); -DEFINE_STRUCT_INITIALIZER_TESTS(assigned_dynamic); -DEFINE_STRUCT_TESTS(assigned_copy, XFAIL); +DEFINE_STRUCT_INITIALIZER_TESTS(static, STRONG_PASS); +DEFINE_STRUCT_INITIALIZER_TESTS(dynamic, STRONG_PASS); +DEFINE_STRUCT_INITIALIZER_TESTS(runtime, STRONG_PASS); +DEFINE_STRUCT_INITIALIZER_TESTS(assigned_static, STRONG_PASS); +DEFINE_STRUCT_INITIALIZER_TESTS(assigned_dynamic, STRONG_PASS); +DEFINE_STRUCT_TESTS(assigned_copy, ALWAYS_FAIL); /* No initialization without compiler instrumentation. */ -DEFINE_SCALAR_TESTS(none, WANT_SUCCESS); -DEFINE_STRUCT_TESTS(none, WANT_SUCCESS); +DEFINE_SCALAR_TESTS(none, STRONG_PASS); +DEFINE_STRUCT_TESTS(none, BYREF_PASS); /* Initialization of members with __user attribute. */ -DEFINE_TEST(user, struct test_user, STRUCT, none, WANT_SUCCESS); +DEFINE_TEST(user, struct test_user, STRUCT, none, USER_PASS); /* * Check two uses through a variable declaration outside either path, @@ -398,7 +357,7 @@ static int noinline __leaf_switch_none(int path, bool fill) * This is intentionally unreachable. To silence the * warning, build with -Wno-switch-unreachable */ - uint64_t var; + uint64_t var[10]; case 1: target_start = &var; @@ -423,19 +382,19 @@ static int noinline __leaf_switch_none(int path, bool fill) memcpy(check_buf, target_start, target_size); break; default: - var = 5; - return var & forced_mask; + var[1] = 5; + return var[1] & forced_mask; } return 0; } -static noinline __init int leaf_switch_1_none(unsigned long sp, bool fill, +static noinline int leaf_switch_1_none(unsigned long sp, bool fill, uint64_t *arg) { return __leaf_switch_none(1, fill); } -static noinline __init int leaf_switch_2_none(unsigned long sp, bool fill, +static noinline int leaf_switch_2_none(unsigned long sp, bool fill, uint64_t *arg) { return __leaf_switch_none(2, fill); @@ -447,68 +406,56 @@ static noinline __init int leaf_switch_2_none(unsigned long sp, bool fill, * non-code areas (i.e. in a switch statement before the first "case"). * https://bugs.llvm.org/show_bug.cgi?id=44916 */ -DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, XFAIL); -DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, XFAIL); - -static int __init test_stackinit_init(void) -{ - unsigned int failures = 0; - -#define test_scalars(init) do { \ - failures += test_u8_ ## init (); \ - failures += test_u16_ ## init (); \ - failures += test_u32_ ## init (); \ - failures += test_u64_ ## init (); \ - failures += test_char_array_ ## init (); \ - } while (0) - -#define test_structs(init) do { \ - failures += test_small_hole_ ## init (); \ - failures += test_big_hole_ ## init (); \ - failures += test_trailing_hole_ ## init (); \ - failures += test_packed_ ## init (); \ - } while (0) - +DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, ALWAYS_FAIL); +DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, ALWAYS_FAIL); + +#define KUNIT_test_scalars(init) \ + KUNIT_CASE(test_u8_ ## init), \ + KUNIT_CASE(test_u16_ ## init), \ + KUNIT_CASE(test_u32_ ## init), \ + KUNIT_CASE(test_u64_ ## init), \ + KUNIT_CASE(test_char_array_ ## init) + +#define KUNIT_test_structs(init) \ + KUNIT_CASE(test_small_hole_ ## init), \ + KUNIT_CASE(test_big_hole_ ## init), \ + KUNIT_CASE(test_trailing_hole_ ## init),\ + KUNIT_CASE(test_packed_ ## init) \ + +static struct kunit_case stackinit_test_cases[] = { /* These are explicitly initialized and should always pass. */ - test_scalars(zero); - test_structs(zero); + KUNIT_test_scalars(zero), + KUNIT_test_structs(zero), /* Padding here appears to be accidentally always initialized? */ - test_structs(dynamic_partial); - test_structs(assigned_dynamic_partial); + KUNIT_test_structs(dynamic_partial), + KUNIT_test_structs(assigned_dynamic_partial), /* Padding initialization depends on compiler behaviors. */ - test_structs(static_partial); - test_structs(static_all); - test_structs(dynamic_all); - test_structs(runtime_partial); - test_structs(runtime_all); - test_structs(assigned_static_partial); - test_structs(assigned_static_all); - test_structs(assigned_dynamic_all); + KUNIT_test_structs(static_partial), + KUNIT_test_structs(static_all), + KUNIT_test_structs(dynamic_all), + KUNIT_test_structs(runtime_partial), + KUNIT_test_structs(runtime_all), + KUNIT_test_structs(assigned_static_partial), + KUNIT_test_structs(assigned_static_all), + KUNIT_test_structs(assigned_dynamic_all), /* Everything fails this since it effectively performs a memcpy(). */ - test_structs(assigned_copy); - + KUNIT_test_structs(assigned_copy), /* STRUCTLEAK_BYREF_ALL should cover everything from here down. */ - test_scalars(none); - failures += test_switch_1_none(); - failures += test_switch_2_none(); - + KUNIT_test_scalars(none), + KUNIT_CASE(test_switch_1_none), + KUNIT_CASE(test_switch_2_none), /* STRUCTLEAK_BYREF should cover from here down. */ - test_structs(none); - + KUNIT_test_structs(none), /* STRUCTLEAK will only cover this. */ - failures += test_user(); - - if (failures == 0) - pr_info("all tests passed!\n"); - else - pr_err("failures: %u\n", failures); + KUNIT_CASE(test_user), + {} +}; - return failures ? -EINVAL : 0; -} -module_init(test_stackinit_init); +static struct kunit_suite stackinit_test_suite = { + .name = "stackinit", + .test_cases = stackinit_test_cases, +}; -static void __exit test_stackinit_exit(void) -{ } -module_exit(test_stackinit_exit); +kunit_test_suites(&stackinit_test_suite); MODULE_LICENSE("GPL"); diff --git a/lib/string_helpers.c b/lib/string_helpers.c index d5d008f5b1d9..90f9f1b7afec 100644 --- a/lib/string_helpers.c +++ b/lib/string_helpers.c @@ -10,6 +10,7 @@ #include <linux/math64.h> #include <linux/export.h> #include <linux/ctype.h> +#include <linux/device.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/limits.h> @@ -675,6 +676,39 @@ char *kstrdup_quotable_file(struct file *file, gfp_t gfp) EXPORT_SYMBOL_GPL(kstrdup_quotable_file); /** + * kasprintf_strarray - allocate and fill array of sequential strings + * @gfp: flags for the slab allocator + * @prefix: prefix to be used + * @n: amount of lines to be allocated and filled + * + * Allocates and fills @n strings using pattern "%s-%zu", where prefix + * is provided by caller. The caller is responsible to free them with + * kfree_strarray() after use. + * + * Returns array of strings or NULL when memory can't be allocated. + */ +char **kasprintf_strarray(gfp_t gfp, const char *prefix, size_t n) +{ + char **names; + size_t i; + + names = kcalloc(n + 1, sizeof(char *), gfp); + if (!names) + return NULL; + + for (i = 0; i < n; i++) { + names[i] = kasprintf(gfp, "%s-%zu", prefix, i); + if (!names[i]) { + kfree_strarray(names, i); + return NULL; + } + } + + return names; +} +EXPORT_SYMBOL_GPL(kasprintf_strarray); + +/** * kfree_strarray - free a number of dynamically allocated strings contained * in an array and the array itself * @@ -697,6 +731,36 @@ void kfree_strarray(char **array, size_t n) } EXPORT_SYMBOL_GPL(kfree_strarray); +struct strarray { + char **array; + size_t n; +}; + +static void devm_kfree_strarray(struct device *dev, void *res) +{ + struct strarray *array = res; + + kfree_strarray(array->array, array->n); +} + +char **devm_kasprintf_strarray(struct device *dev, const char *prefix, size_t n) +{ + struct strarray *ptr; + + ptr = devres_alloc(devm_kfree_strarray, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + ptr->array = kasprintf_strarray(GFP_KERNEL, prefix, n); + if (!ptr->array) { + devres_free(ptr); + return ERR_PTR(-ENOMEM); + } + + return ptr->array; +} +EXPORT_SYMBOL_GPL(devm_kasprintf_strarray); + /** * strscpy_pad() - Copy a C-string into a sized buffer * @dest: Where to copy the string to diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c index d33fa5a61b95..0c82f07f74fc 100644 --- a/lib/test_bitmap.c +++ b/lib/test_bitmap.c @@ -446,6 +446,42 @@ static void __init test_bitmap_parselist(void) } } +static void __init test_bitmap_printlist(void) +{ + unsigned long *bmap = kmalloc(PAGE_SIZE, GFP_KERNEL); + char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + char expected[256]; + int ret, slen; + ktime_t time; + + if (!buf || !bmap) + goto out; + + memset(bmap, -1, PAGE_SIZE); + slen = snprintf(expected, 256, "0-%ld", PAGE_SIZE * 8 - 1); + if (slen < 0) + goto out; + + time = ktime_get(); + ret = bitmap_print_to_pagebuf(true, buf, bmap, PAGE_SIZE * 8); + time = ktime_get() - time; + + if (ret != slen + 1) { + pr_err("bitmap_print_to_pagebuf: result is %d, expected %d\n", ret, slen); + goto out; + } + + if (strncmp(buf, expected, slen)) { + pr_err("bitmap_print_to_pagebuf: result is %s, expected %s\n", buf, expected); + goto out; + } + + pr_err("bitmap_print_to_pagebuf: input is '%s', Time: %llu\n", buf, time); +out: + kfree(buf); + kfree(bmap); +} + static const unsigned long parse_test[] __initconst = { BITMAP_FROM_U64(0), BITMAP_FROM_U64(1), @@ -818,6 +854,7 @@ static void __init selftest(void) test_bitmap_arr32(); test_bitmap_parse(); test_bitmap_parselist(); + test_bitmap_printlist(); test_mem_optimisations(); test_for_each_set_clump8(); test_bitmap_cut(); diff --git a/lib/test_hash.c b/lib/test_hash.c index 0ee40b4a56dd..bb25fda34794 100644 --- a/lib/test_hash.c +++ b/lib/test_hash.c @@ -14,17 +14,15 @@ * and hash_64(). */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt "\n" - #include <linux/compiler.h> #include <linux/types.h> #include <linux/module.h> #include <linux/hash.h> #include <linux/stringhash.h> -#include <linux/printk.h> +#include <kunit/test.h> /* 32-bit XORSHIFT generator. Seed must not be zero. */ -static u32 __init __attribute_const__ +static u32 __attribute_const__ xorshift(u32 seed) { seed ^= seed << 13; @@ -34,7 +32,7 @@ xorshift(u32 seed) } /* Given a non-zero x, returns a non-zero byte. */ -static u8 __init __attribute_const__ +static u8 __attribute_const__ mod255(u32 x) { x = (x & 0xffff) + (x >> 16); /* 1 <= x <= 0x1fffe */ @@ -45,8 +43,7 @@ mod255(u32 x) } /* Fill the buffer with non-zero bytes. */ -static void __init -fill_buf(char *buf, size_t len, u32 seed) +static void fill_buf(char *buf, size_t len, u32 seed) { size_t i; @@ -56,6 +53,50 @@ fill_buf(char *buf, size_t len, u32 seed) } } +/* Holds most testing variables for the int test. */ +struct test_hash_params { + /* Pointer to integer to be hashed. */ + unsigned long long *h64; + /* Low 32-bits of integer to be hashed. */ + u32 h0; + /* Arch-specific hash result. */ + u32 h1; + /* Generic hash result. */ + u32 h2; + /* ORed hashes of given size (in bits). */ + u32 (*hash_or)[33]; +}; + +#ifdef HAVE_ARCH__HASH_32 +static void +test_int__hash_32(struct kunit *test, struct test_hash_params *params) +{ + params->hash_or[1][0] |= params->h2 = __hash_32_generic(params->h0); +#if HAVE_ARCH__HASH_32 == 1 + KUNIT_EXPECT_EQ_MSG(test, params->h1, params->h2, + "__hash_32(%#x) = %#x != __hash_32_generic() = %#x", + params->h0, params->h1, params->h2); +#endif +} +#endif + +#ifdef HAVE_ARCH_HASH_64 +static void +test_int_hash_64(struct kunit *test, struct test_hash_params *params, u32 const *m, int *k) +{ + params->h2 = hash_64_generic(*params->h64, *k); +#if HAVE_ARCH_HASH_64 == 1 + KUNIT_EXPECT_EQ_MSG(test, params->h1, params->h2, + "hash_64(%#llx, %d) = %#x != hash_64_generic() = %#x", + *params->h64, *k, params->h1, params->h2); +#else + KUNIT_EXPECT_LE_MSG(test, params->h1, params->h2, + "hash_64_generic(%#llx, %d) = %#x > %#x", + *params->h64, *k, params->h1, *m); +#endif +} +#endif + /* * Test the various integer hash functions. h64 (or its low-order bits) * is the integer to hash. hash_or accumulates the OR of the hash values, @@ -65,23 +106,16 @@ fill_buf(char *buf, size_t len, u32 seed) * inline, the code being tested is actually in the module, and you can * recompile and re-test the module without rebooting. */ -static bool __init -test_int_hash(unsigned long long h64, u32 hash_or[2][33]) +static void +test_int_hash(struct kunit *test, unsigned long long h64, u32 hash_or[2][33]) { int k; - u32 h0 = (u32)h64, h1, h2; + struct test_hash_params params = { &h64, (u32)h64, 0, 0, hash_or }; /* Test __hash32 */ - hash_or[0][0] |= h1 = __hash_32(h0); + hash_or[0][0] |= params.h1 = __hash_32(params.h0); #ifdef HAVE_ARCH__HASH_32 - hash_or[1][0] |= h2 = __hash_32_generic(h0); -#if HAVE_ARCH__HASH_32 == 1 - if (h1 != h2) { - pr_err("__hash_32(%#x) = %#x != __hash_32_generic() = %#x", - h0, h1, h2); - return false; - } -#endif + test_int__hash_32(test, ¶ms); #endif /* Test k = 1..32 bits */ @@ -89,63 +123,53 @@ test_int_hash(unsigned long long h64, u32 hash_or[2][33]) u32 const m = ((u32)2 << (k-1)) - 1; /* Low k bits set */ /* Test hash_32 */ - hash_or[0][k] |= h1 = hash_32(h0, k); - if (h1 > m) { - pr_err("hash_32(%#x, %d) = %#x > %#x", h0, k, h1, m); - return false; - } -#ifdef HAVE_ARCH_HASH_32 - h2 = hash_32_generic(h0, k); -#if HAVE_ARCH_HASH_32 == 1 - if (h1 != h2) { - pr_err("hash_32(%#x, %d) = %#x != hash_32_generic() " - " = %#x", h0, k, h1, h2); - return false; - } -#else - if (h2 > m) { - pr_err("hash_32_generic(%#x, %d) = %#x > %#x", - h0, k, h1, m); - return false; - } -#endif -#endif + hash_or[0][k] |= params.h1 = hash_32(params.h0, k); + KUNIT_EXPECT_LE_MSG(test, params.h1, m, + "hash_32(%#x, %d) = %#x > %#x", + params.h0, k, params.h1, m); + /* Test hash_64 */ - hash_or[1][k] |= h1 = hash_64(h64, k); - if (h1 > m) { - pr_err("hash_64(%#llx, %d) = %#x > %#x", h64, k, h1, m); - return false; - } + hash_or[1][k] |= params.h1 = hash_64(h64, k); + KUNIT_EXPECT_LE_MSG(test, params.h1, m, + "hash_64(%#llx, %d) = %#x > %#x", + h64, k, params.h1, m); #ifdef HAVE_ARCH_HASH_64 - h2 = hash_64_generic(h64, k); -#if HAVE_ARCH_HASH_64 == 1 - if (h1 != h2) { - pr_err("hash_64(%#llx, %d) = %#x != hash_64_generic() " - "= %#x", h64, k, h1, h2); - return false; - } -#else - if (h2 > m) { - pr_err("hash_64_generic(%#llx, %d) = %#x > %#x", - h64, k, h1, m); - return false; - } -#endif + test_int_hash_64(test, ¶ms, &m, &k); #endif } - - (void)h2; /* Suppress unused variable warning */ - return true; } #define SIZE 256 /* Run time is cubic in SIZE */ -static int __init -test_hash_init(void) +static void test_string_or(struct kunit *test) { char buf[SIZE+1]; - u32 string_or = 0, hash_or[2][33] = { { 0, } }; - unsigned tests = 0; + u32 string_or = 0; + int i, j; + + fill_buf(buf, SIZE, 1); + + /* Test every possible non-empty substring in the buffer. */ + for (j = SIZE; j > 0; --j) { + buf[j] = '\0'; + + for (i = 0; i <= j; i++) { + u32 h0 = full_name_hash(buf+i, buf+i, j-i); + + string_or |= h0; + } /* i */ + } /* j */ + + /* The OR of all the hash values should cover all the bits */ + KUNIT_EXPECT_EQ_MSG(test, string_or, -1u, + "OR of all string hash results = %#x != %#x", + string_or, -1u); +} + +static void test_hash_or(struct kunit *test) +{ + char buf[SIZE+1]; + u32 hash_or[2][33] = { { 0, } }; unsigned long long h64 = 0; int i, j; @@ -160,46 +184,27 @@ test_hash_init(void) u32 h0 = full_name_hash(buf+i, buf+i, j-i); /* Check that hashlen_string gets the length right */ - if (hashlen_len(hashlen) != j-i) { - pr_err("hashlen_string(%d..%d) returned length" - " %u, expected %d", - i, j, hashlen_len(hashlen), j-i); - return -EINVAL; - } + KUNIT_EXPECT_EQ_MSG(test, hashlen_len(hashlen), j-i, + "hashlen_string(%d..%d) returned length %u, expected %d", + i, j, hashlen_len(hashlen), j-i); /* Check that the hashes match */ - if (hashlen_hash(hashlen) != h0) { - pr_err("hashlen_string(%d..%d) = %08x != " - "full_name_hash() = %08x", - i, j, hashlen_hash(hashlen), h0); - return -EINVAL; - } + KUNIT_EXPECT_EQ_MSG(test, hashlen_hash(hashlen), h0, + "hashlen_string(%d..%d) = %08x != full_name_hash() = %08x", + i, j, hashlen_hash(hashlen), h0); - string_or |= h0; h64 = h64 << 32 | h0; /* For use with hash_64 */ - if (!test_int_hash(h64, hash_or)) - return -EINVAL; - tests++; + test_int_hash(test, h64, hash_or); } /* i */ } /* j */ - /* The OR of all the hash values should cover all the bits */ - if (~string_or) { - pr_err("OR of all string hash results = %#x != %#x", - string_or, -1u); - return -EINVAL; - } - if (~hash_or[0][0]) { - pr_err("OR of all __hash_32 results = %#x != %#x", - hash_or[0][0], -1u); - return -EINVAL; - } + KUNIT_EXPECT_EQ_MSG(test, hash_or[0][0], -1u, + "OR of all __hash_32 results = %#x != %#x", + hash_or[0][0], -1u); #ifdef HAVE_ARCH__HASH_32 #if HAVE_ARCH__HASH_32 != 1 /* Test is pointless if results match */ - if (~hash_or[1][0]) { - pr_err("OR of all __hash_32_generic results = %#x != %#x", - hash_or[1][0], -1u); - return -EINVAL; - } + KUNIT_EXPECT_EQ_MSG(test, hash_or[1][0], -1u, + "OR of all __hash_32_generic results = %#x != %#x", + hash_or[1][0], -1u); #endif #endif @@ -207,51 +212,27 @@ test_hash_init(void) for (i = 1; i <= 32; i++) { u32 const m = ((u32)2 << (i-1)) - 1; /* Low i bits set */ - if (hash_or[0][i] != m) { - pr_err("OR of all hash_32(%d) results = %#x " - "(%#x expected)", i, hash_or[0][i], m); - return -EINVAL; - } - if (hash_or[1][i] != m) { - pr_err("OR of all hash_64(%d) results = %#x " - "(%#x expected)", i, hash_or[1][i], m); - return -EINVAL; - } + KUNIT_EXPECT_EQ_MSG(test, hash_or[0][i], m, + "OR of all hash_32(%d) results = %#x (%#x expected)", + i, hash_or[0][i], m); + KUNIT_EXPECT_EQ_MSG(test, hash_or[1][i], m, + "OR of all hash_64(%d) results = %#x (%#x expected)", + i, hash_or[1][i], m); } +} - /* Issue notices about skipped tests. */ -#ifdef HAVE_ARCH__HASH_32 -#if HAVE_ARCH__HASH_32 != 1 - pr_info("__hash_32() is arch-specific; not compared to generic."); -#endif -#else - pr_info("__hash_32() has no arch implementation to test."); -#endif -#ifdef HAVE_ARCH_HASH_32 -#if HAVE_ARCH_HASH_32 != 1 - pr_info("hash_32() is arch-specific; not compared to generic."); -#endif -#else - pr_info("hash_32() has no arch implementation to test."); -#endif -#ifdef HAVE_ARCH_HASH_64 -#if HAVE_ARCH_HASH_64 != 1 - pr_info("hash_64() is arch-specific; not compared to generic."); -#endif -#else - pr_info("hash_64() has no arch implementation to test."); -#endif - - pr_notice("%u tests passed.", tests); +static struct kunit_case hash_test_cases[] __refdata = { + KUNIT_CASE(test_string_or), + KUNIT_CASE(test_hash_or), + {} +}; - return 0; -} +static struct kunit_suite hash_test_suite = { + .name = "hash", + .test_cases = hash_test_cases, +}; -static void __exit test_hash_exit(void) -{ -} -module_init(test_hash_init); /* Does everything */ -module_exit(test_hash_exit); /* Does nothing */ +kunit_test_suite(hash_test_suite); MODULE_LICENSE("GPL"); diff --git a/lib/test_hmm.c b/lib/test_hmm.c index e2ce8f9b7605..cfe632047839 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -12,6 +12,7 @@ #include <linux/kernel.h> #include <linux/cdev.h> #include <linux/device.h> +#include <linux/memremap.h> #include <linux/mutex.h> #include <linux/rwsem.h> #include <linux/sched.h> @@ -26,6 +27,8 @@ #include <linux/sched/mm.h> #include <linux/platform_device.h> #include <linux/rmap.h> +#include <linux/mmu_notifier.h> +#include <linux/migrate.h> #include "test_hmm_uapi.h" @@ -563,7 +566,6 @@ static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice) } dpage->zone_device_data = rpage; - get_page(dpage); lock_page(dpage); return dpage; @@ -1086,9 +1088,33 @@ static long dmirror_fops_unlocked_ioctl(struct file *filp, return 0; } +static int dmirror_fops_mmap(struct file *file, struct vm_area_struct *vma) +{ + unsigned long addr; + + for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) { + struct page *page; + int ret; + + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + return -ENOMEM; + + ret = vm_insert_page(vma, addr, page); + if (ret) { + __free_page(page); + return ret; + } + put_page(page); + } + + return 0; +} + static const struct file_operations dmirror_fops = { .open = dmirror_fops_open, .release = dmirror_fops_release, + .mmap = dmirror_fops_mmap, .unlocked_ioctl = dmirror_fops_unlocked_ioctl, .llseek = default_llseek, .owner = THIS_MODULE, diff --git a/lib/test_kasan.c b/lib/test_kasan.c index 0643573f8686..3b413f8c8a71 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -492,6 +492,7 @@ static void kmalloc_oob_in_memset(struct kunit *test) ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + OPTIMIZER_HIDE_VAR(ptr); OPTIMIZER_HIDE_VAR(size); KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size + KASAN_GRANULE_SIZE)); @@ -515,6 +516,7 @@ static void kmalloc_memmove_negative_size(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); memset((char *)ptr, 0, 64); + OPTIMIZER_HIDE_VAR(ptr); OPTIMIZER_HIDE_VAR(invalid_size); KUNIT_EXPECT_KASAN_FAIL(test, memmove((char *)ptr, (char *)ptr + 4, invalid_size)); @@ -531,6 +533,7 @@ static void kmalloc_memmove_invalid_size(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); memset((char *)ptr, 0, 64); + OPTIMIZER_HIDE_VAR(ptr); KUNIT_EXPECT_KASAN_FAIL(test, memmove((char *)ptr, (char *)ptr + 4, invalid_size)); kfree(ptr); @@ -700,7 +703,7 @@ static void kmem_cache_bulk(struct kunit *test) static char global_array[10]; -static void kasan_global_oob(struct kunit *test) +static void kasan_global_oob_right(struct kunit *test) { /* * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS @@ -723,6 +726,20 @@ static void kasan_global_oob(struct kunit *test) KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); } +static void kasan_global_oob_left(struct kunit *test) +{ + char *volatile array = global_array; + char *p = array - 3; + + /* + * GCC is known to fail this test, skip it. + * See https://bugzilla.kernel.org/show_bug.cgi?id=215051. + */ + KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_CC_IS_CLANG); + KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); + KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); +} + /* Check that ksize() makes the whole object accessible. */ static void ksize_unpoisons_memory(struct kunit *test) { @@ -852,6 +869,19 @@ static void kmem_cache_invalid_free(struct kunit *test) kmem_cache_destroy(cache); } +static void empty_cache_ctor(void *object) { } + +static void kmem_cache_double_destroy(struct kunit *test) +{ + struct kmem_cache *cache; + + /* Provide a constructor to prevent cache merging. */ + cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); + kmem_cache_destroy(cache); + KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache)); +} + static void kasan_memchr(struct kunit *test) { char *ptr; @@ -869,6 +899,7 @@ static void kasan_memchr(struct kunit *test) ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + OPTIMIZER_HIDE_VAR(ptr); OPTIMIZER_HIDE_VAR(size); KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = memchr(ptr, '1', size + 1)); @@ -895,6 +926,7 @@ static void kasan_memcmp(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); memset(arr, 0, sizeof(arr)); + OPTIMIZER_HIDE_VAR(ptr); OPTIMIZER_HIDE_VAR(size); KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = memcmp(ptr, arr, size+1)); @@ -1162,7 +1194,8 @@ static struct kunit_case kasan_kunit_test_cases[] = { KUNIT_CASE(kmem_cache_oob), KUNIT_CASE(kmem_cache_accounted), KUNIT_CASE(kmem_cache_bulk), - KUNIT_CASE(kasan_global_oob), + KUNIT_CASE(kasan_global_oob_right), + KUNIT_CASE(kasan_global_oob_left), KUNIT_CASE(kasan_stack_oob), KUNIT_CASE(kasan_alloca_oob_left), KUNIT_CASE(kasan_alloca_oob_right), @@ -1170,6 +1203,7 @@ static struct kunit_case kasan_kunit_test_cases[] = { KUNIT_CASE(ksize_uaf), KUNIT_CASE(kmem_cache_double_free), KUNIT_CASE(kmem_cache_invalid_free), + KUNIT_CASE(kmem_cache_double_destroy), KUNIT_CASE(kasan_memchr), KUNIT_CASE(kasan_memcmp), KUNIT_CASE(kasan_strings), diff --git a/lib/test_meminit.c b/lib/test_meminit.c index e4f706a404b3..3ca717f11397 100644 --- a/lib/test_meminit.c +++ b/lib/test_meminit.c @@ -337,6 +337,7 @@ static int __init do_kmem_cache_size_bulk(int size, int *total_failures) if (num) kmem_cache_free_bulk(c, num, objects); } + kmem_cache_destroy(c); *total_failures += fail; return 1; } diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c index 3750323973f4..a5a3d6c27e1f 100644 --- a/lib/test_sysctl.c +++ b/lib/test_sysctl.c @@ -128,26 +128,6 @@ static struct ctl_table test_table[] = { { } }; -static struct ctl_table test_sysctl_table[] = { - { - .procname = "test_sysctl", - .maxlen = 0, - .mode = 0555, - .child = test_table, - }, - { } -}; - -static struct ctl_table test_sysctl_root_table[] = { - { - .procname = "debug", - .maxlen = 0, - .mode = 0555, - .child = test_sysctl_table, - }, - { } -}; - static struct ctl_table_header *test_sysctl_header; static int __init test_sysctl_init(void) @@ -155,7 +135,7 @@ static int __init test_sysctl_init(void) test_data.bitmap_0001 = kzalloc(SYSCTL_TEST_BITMAP_SIZE/8, GFP_KERNEL); if (!test_data.bitmap_0001) return -ENOMEM; - test_sysctl_header = register_sysctl_table(test_sysctl_root_table); + test_sysctl_header = register_sysctl("debug/test_sysctl", test_table); if (!test_sysctl_header) { kfree(test_data.bitmap_0001); return -ENOMEM; diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c index 7e7bbd0f3fd2..2062be1f2e80 100644 --- a/lib/test_ubsan.c +++ b/lib/test_ubsan.c @@ -79,15 +79,6 @@ static void test_ubsan_load_invalid_value(void) eval2 = eval; } -static void test_ubsan_null_ptr_deref(void) -{ - volatile int *ptr = NULL; - int val; - - UBSAN_TEST(CONFIG_UBSAN_OBJECT_SIZE); - val = *ptr; -} - static void test_ubsan_misaligned_access(void) { volatile char arr[5] __aligned(4) = {1, 2, 3, 4, 5}; @@ -98,29 +89,16 @@ static void test_ubsan_misaligned_access(void) *ptr = val; } -static void test_ubsan_object_size_mismatch(void) -{ - /* "((aligned(8)))" helps this not into be misaligned for ptr-access. */ - volatile int val __aligned(8) = 4; - volatile long long *ptr, val2; - - UBSAN_TEST(CONFIG_UBSAN_OBJECT_SIZE); - ptr = (long long *)&val; - val2 = *ptr; -} - static const test_ubsan_fp test_ubsan_array[] = { test_ubsan_shift_out_of_bounds, test_ubsan_out_of_bounds, test_ubsan_load_invalid_value, test_ubsan_misaligned_access, - test_ubsan_object_size_mismatch, }; /* Excluded because they Oops the module. */ static const test_ubsan_fp skip_ubsan_array[] = { test_ubsan_divrem_overflow, - test_ubsan_null_ptr_deref, }; static int __init test_ubsan_init(void) diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 51495c1aceb8..53fe73a48adf 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -762,14 +762,16 @@ static void enable_ptr_key_workfn(struct work_struct *work) static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn); -static void fill_random_ptr_key(struct random_ready_callback *unused) +static int fill_random_ptr_key(struct notifier_block *nb, + unsigned long action, void *data) { /* This may be in an interrupt handler. */ queue_work(system_unbound_wq, &enable_ptr_key_work); + return 0; } -static struct random_ready_callback random_ready = { - .func = fill_random_ptr_key +static struct notifier_block random_ready = { + .notifier_call = fill_random_ptr_key }; static int __init initialize_ptr_random(void) @@ -783,7 +785,7 @@ static int __init initialize_ptr_random(void) return 0; } - ret = add_random_ready_callback(&random_ready); + ret = register_random_ready_notifier(&random_ready); if (!ret) { return 0; } else if (ret == -EALREADY) { @@ -1259,20 +1261,13 @@ char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap, struct printf_spec spec, const char *fmt) { int nr_bits = max_t(int, spec.field_width, 0); - /* current bit is 'cur', most recently seen range is [rbot, rtop] */ - int cur, rbot, rtop; bool first = true; + int rbot, rtop; if (check_pointer(&buf, end, bitmap, spec)) return buf; - rbot = cur = find_first_bit(bitmap, nr_bits); - while (cur < nr_bits) { - rtop = cur; - cur = find_next_bit(bitmap, nr_bits, cur + 1); - if (cur < nr_bits && cur <= rtop + 1) - continue; - + for_each_set_bitrange(rbot, rtop, bitmap, nr_bits) { if (!first) { if (buf < end) *buf = ','; @@ -1281,15 +1276,12 @@ char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap, first = false; buf = number(buf, end, rbot, default_dec_spec); - if (rbot < rtop) { - if (buf < end) - *buf = '-'; - buf++; - - buf = number(buf, end, rtop, default_dec_spec); - } + if (rtop == rbot + 1) + continue; - rbot = cur; + if (buf < end) + *buf = '-'; + buf = number(++buf, end, rtop - 1, default_dec_spec); } return buf; } diff --git a/lib/xarray.c b/lib/xarray.c index f5d8f54907b4..b95e92598b9c 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -157,7 +157,7 @@ static void xas_move_index(struct xa_state *xas, unsigned long offset) xas->xa_index += offset << shift; } -static void xas_advance(struct xa_state *xas) +static void xas_next_offset(struct xa_state *xas) { xas->xa_offset++; xas_move_index(xas, xas->xa_offset); @@ -302,7 +302,7 @@ bool xas_nomem(struct xa_state *xas, gfp_t gfp) } if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT) gfp |= __GFP_ACCOUNT; - xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); + xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp); if (!xas->xa_alloc) return false; xas->xa_alloc->parent = NULL; @@ -334,10 +334,10 @@ static bool __xas_nomem(struct xa_state *xas, gfp_t gfp) gfp |= __GFP_ACCOUNT; if (gfpflags_allow_blocking(gfp)) { xas_unlock_type(xas, lock_type); - xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); + xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp); xas_lock_type(xas, lock_type); } else { - xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); + xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp); } if (!xas->xa_alloc) return false; @@ -371,7 +371,7 @@ static void *xas_alloc(struct xa_state *xas, unsigned int shift) if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT) gfp |= __GFP_ACCOUNT; - node = kmem_cache_alloc(radix_tree_node_cachep, gfp); + node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp); if (!node) { xas_set_err(xas, -ENOMEM); return NULL; @@ -1014,7 +1014,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order, void *sibling = NULL; struct xa_node *node; - node = kmem_cache_alloc(radix_tree_node_cachep, gfp); + node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp); if (!node) goto nomem; node->array = xas->xa; @@ -1250,7 +1250,7 @@ void *xas_find(struct xa_state *xas, unsigned long max) xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1; } - xas_advance(xas); + xas_next_offset(xas); while (xas->xa_node && (xas->xa_index <= max)) { if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) { @@ -1268,7 +1268,7 @@ void *xas_find(struct xa_state *xas, unsigned long max) if (entry && !xa_is_sibling(entry)) return entry; - xas_advance(xas); + xas_next_offset(xas); } if (!xas->xa_node) |
