diff options
| author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2022-07-08 13:39:28 -0700 |
|---|---|---|
| committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2022-07-08 13:39:28 -0700 |
| commit | a63f7778f76e1cf8ed3bcb7a1d9453c9609121ad (patch) | |
| tree | 9a26f3ff67dfbd542c6eca3e5c1c7f443b1647dd /lib | |
| parent | c4bcc1b99b8b8acdfe673e4701a9c2acb6b8b2fb (diff) | |
| parent | 88084a3df1672e131ddc1b4e39eeacfd39864acf (diff) | |
Merge tag 'v5.19-rc5' into next
Merge with mainline to bring up the latest definition from MFD subsystem
needed for Mediatek keypad driver.
Diffstat (limited to 'lib')
55 files changed, 2053 insertions, 1316 deletions
diff --git a/lib/.gitignore b/lib/.gitignore index e5e217b8307b..54596b634ecb 100644 --- a/lib/.gitignore +++ b/lib/.gitignore @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only /crc32table.h /crc64table.h +/default.bconf /gen_crc32table /gen_crc64table /oid_registry_data.c diff --git a/lib/Kconfig b/lib/Kconfig index 087e06b4cdfd..eaaad4d85bf2 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -120,6 +120,9 @@ config INDIRECT_IOMEM_FALLBACK source "lib/crypto/Kconfig" +config LIB_MEMNEQ + bool + config CRC_CCITT tristate "CRC-CCITT functions" help @@ -737,3 +740,6 @@ config PLDMFW config ASN1_ENCODER tristate + +config POLYNOMIAL + tristate diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 075cd25363ac..2e24db4bff19 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -485,24 +485,25 @@ config FRAME_POINTER larger and slower, but it gives very useful debugging information in case of kernel bugs. (precise oopses/stacktraces/warnings) +config OBJTOOL + bool + config STACK_VALIDATION bool "Compile-time stack metadata validation" - depends on HAVE_STACK_VALIDATION + depends on HAVE_STACK_VALIDATION && UNWINDER_FRAME_POINTER + select OBJTOOL default n help - Add compile-time checks to validate stack metadata, including frame - pointers (if CONFIG_FRAME_POINTER is enabled). This helps ensure - that runtime stack traces are more reliable. - - This is also a prerequisite for generation of ORC unwind data, which - is needed for CONFIG_UNWINDER_ORC. + Validate frame pointer rules at compile-time. This helps ensure that + runtime stack traces are more reliable. For more information, see tools/objtool/Documentation/stack-validation.txt. -config VMLINUX_VALIDATION +config NOINSTR_VALIDATION bool - depends on STACK_VALIDATION && DEBUG_ENTRY + depends on HAVE_NOINSTR_VALIDATION && DEBUG_ENTRY + select OBJTOOL default y config VMLINUX_MAP @@ -698,40 +699,6 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT help Debug objects boot parameter default value -config DEBUG_SLAB - bool "Debug slab memory allocations" - depends on DEBUG_KERNEL && SLAB - help - Say Y here to have the kernel do limited verification on memory - allocation as well as poisoning memory on free to catch use of freed - memory. This can make kmalloc/kfree-intensive workloads much slower. - -config SLUB_DEBUG_ON - bool "SLUB debugging on by default" - depends on SLUB && SLUB_DEBUG - default n - help - Boot with debugging on by default. SLUB boots by default with - the runtime debug capabilities switched off. Enabling this is - equivalent to specifying the "slub_debug" parameter on boot. - There is no support for more fine grained debug control like - possible with slub_debug=xxx. SLUB debugging may be switched - off in a kernel built with CONFIG_SLUB_DEBUG_ON by specifying - "slub_debug=-". - -config SLUB_STATS - default n - bool "Enable SLUB performance statistics" - depends on SLUB && SYSFS - help - SLUB statistics are useful to debug SLUBs allocation behavior in - order find ways to optimize the allocator. This should never be - enabled for production use since keeping statistics slows down - the allocator by a few percentage points. The slabinfo command - supports the determination of the most active slabs to figure - out which slabs are relevant to a particular load. - Try running: slabinfo -DA - config HAVE_DEBUG_KMEMLEAK bool @@ -1071,13 +1038,6 @@ config BOOTPARAM_SOFTLOCKUP_PANIC Say N if unsure. -config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE - int - depends on SOFTLOCKUP_DETECTOR - range 0 1 - default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC - default 1 if BOOTPARAM_SOFTLOCKUP_PANIC - config HARDLOCKUP_DETECTOR_PERF bool select SOFTLOCKUP_DETECTOR @@ -1119,13 +1079,6 @@ config BOOTPARAM_HARDLOCKUP_PANIC Say N if unsure. -config BOOTPARAM_HARDLOCKUP_PANIC_VALUE - int - depends on HARDLOCKUP_DETECTOR - range 0 1 - default 0 if !BOOTPARAM_HARDLOCKUP_PANIC - default 1 if BOOTPARAM_HARDLOCKUP_PANIC - config DETECT_HUNG_TASK bool "Detect Hung Tasks" depends on DEBUG_KERNEL @@ -1173,13 +1126,6 @@ config BOOTPARAM_HUNG_TASK_PANIC Say N if unsure. -config BOOTPARAM_HUNG_TASK_PANIC_VALUE - int - depends on DETECT_HUNG_TASK - range 0 1 - default 0 if !BOOTPARAM_HUNG_TASK_PANIC - default 1 if BOOTPARAM_HUNG_TASK_PANIC - config WQ_WATCHDOG bool "Detect Workqueue Stalls" depends on DEBUG_KERNEL @@ -1544,29 +1490,6 @@ config CSD_LOCK_WAIT_DEBUG include the IPI handler function currently executing (if any) and relevant stack traces. -choice - prompt "Lock debugging: prove subsystem device_lock() correctness" - depends on PROVE_LOCKING - help - For subsystems that have instrumented their usage of the device_lock() - with nested annotations, enable lock dependency checking. The locking - hierarchy 'subclass' identifiers are not compatible across - sub-systems, so only one can be enabled at a time. - -config PROVE_NVDIMM_LOCKING - bool "NVDIMM" - depends on LIBNVDIMM - help - Enable lockdep to validate nd_device_lock() usage. - -config PROVE_CXL_LOCKING - bool "CXL" - depends on CXL_BUS - help - Enable lockdep to validate cxl_device_lock() usage. - -endchoice - endmenu # lock debugging config TRACE_IRQFLAGS @@ -1616,8 +1539,7 @@ config WARN_ALL_UNSEEDED_RANDOM so architecture maintainers really need to do what they can to get the CRNG seeded sooner after the system is booted. However, since users cannot do anything actionable to - address this, by default the kernel will issue only a single - warning for the first use of unseeded randomness. + address this, by default this option is disabled. Say Y here if you want to receive warnings for all uses of unseeded randomness. This will be of use primarily for @@ -2035,10 +1957,11 @@ config KCOV bool "Code coverage for fuzzing" depends on ARCH_HAS_KCOV depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS - depends on !ARCH_WANTS_NO_INSTR || STACK_VALIDATION || \ + depends on !ARCH_WANTS_NO_INSTR || HAVE_NOINSTR_HACK || \ GCC_VERSION >= 120000 || CLANG_VERSION >= 130000 select DEBUG_FS select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC + select OBJTOOL if HAVE_NOINSTR_HACK help KCOV exposes kernel code coverage information in a form suitable for coverage-guided fuzzing (randomized testing). @@ -2140,10 +2063,11 @@ config TEST_DIV64 If unsure, say N. config KPROBES_SANITY_TEST - tristate "Kprobes sanity tests" + tristate "Kprobes sanity tests" if !KUNIT_ALL_TESTS depends on DEBUG_KERNEL depends on KPROBES depends on KUNIT + default KUNIT_ALL_TESTS help This option provides for testing basic kprobes functionality on boot. Samples of kprobe and kretprobe are inserted and @@ -2417,8 +2341,9 @@ config TEST_SYSCTL If unsure, say N. config BITFIELD_KUNIT - tristate "KUnit test bitfield functions at runtime" + tristate "KUnit test bitfield functions at runtime" if !KUNIT_ALL_TESTS depends on KUNIT + default KUNIT_ALL_TESTS help Enable this option to test the bitfield functions at boot. @@ -2452,8 +2377,9 @@ config HASH_KUNIT_TEST optimized versions. If unsure, say N. config RESOURCE_KUNIT_TEST - tristate "KUnit test for resource API" + tristate "KUnit test for resource API" if !KUNIT_ALL_TESTS depends on KUNIT + default KUNIT_ALL_TESTS help This builds the resource API unit test. Tests the logic of API provided by resource.c and ioport.h. @@ -2506,8 +2432,9 @@ config LINEAR_RANGES_TEST If unsure, say N. config CMDLINE_KUNIT_TEST - tristate "KUnit test for cmdline API" + tristate "KUnit test for cmdline API" if !KUNIT_ALL_TESTS depends on KUNIT + default KUNIT_ALL_TESTS help This builds the cmdline API unit test. Tests the logic of API provided by cmdline.c. @@ -2517,8 +2444,9 @@ config CMDLINE_KUNIT_TEST If unsure, say N. config BITS_TEST - tristate "KUnit test for bits.h" + tristate "KUnit test for bits.h" if !KUNIT_ALL_TESTS depends on KUNIT + default KUNIT_ALL_TESTS help This builds the bits unit test. Tests the logic of macros defined in bits.h. diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index 1f3e620188a2..f0973da583e0 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only + # This config refers to the generic KASAN mode. config HAVE_ARCH_KASAN bool @@ -15,9 +16,8 @@ config HAVE_ARCH_KASAN_VMALLOC config ARCH_DISABLE_KASAN_INLINE bool help - An architecture might not support inline instrumentation. - When this option is selected, inline and stack instrumentation are - disabled. + Disables both inline and stack instrumentation. Selected by + architectures that do not support these instrumentation types. config CC_HAS_KASAN_GENERIC def_bool $(cc-option, -fsanitize=kernel-address) @@ -26,13 +26,13 @@ config CC_HAS_KASAN_SW_TAGS def_bool $(cc-option, -fsanitize=kernel-hwaddress) # This option is only required for software KASAN modes. -# Old GCC versions don't have proper support for no_sanitize_address. +# Old GCC versions do not have proper support for no_sanitize_address. # See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89124 for details. config CC_HAS_WORKING_NOSANITIZE_ADDRESS def_bool !CC_IS_GCC || GCC_VERSION >= 80300 menuconfig KASAN - bool "KASAN: runtime memory debugger" + bool "KASAN: dynamic memory safety error detector" depends on (((HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \ (HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)) && \ CC_HAS_WORKING_NOSANITIZE_ADDRESS) || \ @@ -40,10 +40,13 @@ menuconfig KASAN depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB) select STACKDEPOT_ALWAYS_INIT help - Enables KASAN (KernelAddressSANitizer) - runtime memory debugger, - designed to find out-of-bounds accesses and use-after-free bugs. + Enables KASAN (Kernel Address Sanitizer) - a dynamic memory safety + error detector designed to find out-of-bounds and use-after-free bugs. + See Documentation/dev-tools/kasan.rst for details. + For better error reports, also enable CONFIG_STACKTRACE. + if KASAN choice @@ -51,75 +54,71 @@ choice default KASAN_GENERIC help KASAN has three modes: - 1. generic KASAN (similar to userspace ASan, - x86_64/arm64/xtensa, enabled with CONFIG_KASAN_GENERIC), - 2. software tag-based KASAN (arm64 only, based on software - memory tagging (similar to userspace HWASan), enabled with - CONFIG_KASAN_SW_TAGS), and - 3. hardware tag-based KASAN (arm64 only, based on hardware - memory tagging, enabled with CONFIG_KASAN_HW_TAGS). - All KASAN modes are strictly debugging features. + 1. Generic KASAN (supported by many architectures, enabled with + CONFIG_KASAN_GENERIC, similar to userspace ASan), + 2. Software Tag-Based KASAN (arm64 only, based on software memory + tagging, enabled with CONFIG_KASAN_SW_TAGS, similar to userspace + HWASan), and + 3. Hardware Tag-Based KASAN (arm64 only, based on hardware memory + tagging, enabled with CONFIG_KASAN_HW_TAGS). - For better error reports enable CONFIG_STACKTRACE. + See Documentation/dev-tools/kasan.rst for details about each mode. config KASAN_GENERIC - bool "Generic mode" + bool "Generic KASAN" depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS select SLUB_DEBUG if SLUB select CONSTRUCTORS help - Enables generic KASAN mode. + Enables Generic KASAN. - This mode is supported in both GCC and Clang. With GCC it requires - version 8.3.0 or later. Any supported Clang version is compatible, - but detection of out-of-bounds accesses for global variables is - supported only since Clang 11. + Requires GCC 8.3.0+ or Clang. - This mode consumes about 1/8th of available memory at kernel start - and introduces an overhead of ~x1.5 for the rest of the allocations. + Consumes about 1/8th of available memory at kernel start and adds an + overhead of ~50% for dynamic allocations. The performance slowdown is ~x3. - Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB - (the resulting kernel does not boot). + (Incompatible with CONFIG_DEBUG_SLAB: the kernel does not boot.) config KASAN_SW_TAGS - bool "Software tag-based mode" + bool "Software Tag-Based KASAN" depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS select SLUB_DEBUG if SLUB select CONSTRUCTORS help - Enables software tag-based KASAN mode. + Enables Software Tag-Based KASAN. - This mode require software memory tagging support in the form of - HWASan-like compiler instrumentation. + Requires GCC 11+ or Clang. - Currently this mode is only implemented for arm64 CPUs and relies on - Top Byte Ignore. This mode requires Clang. + Supported only on arm64 CPUs and relies on Top Byte Ignore. - This mode consumes about 1/16th of available memory at kernel start - and introduces an overhead of ~20% for the rest of the allocations. - This mode may potentially introduce problems relating to pointer - casting and comparison, as it embeds tags into the top byte of each - pointer. + Consumes about 1/16th of available memory at kernel start and + add an overhead of ~20% for dynamic allocations. - Currently CONFIG_KASAN_SW_TAGS doesn't work with CONFIG_DEBUG_SLAB - (the resulting kernel does not boot). + May potentially introduce problems related to pointer casting and + comparison, as it embeds a tag into the top byte of each pointer. + + (Incompatible with CONFIG_DEBUG_SLAB: the kernel does not boot.) config KASAN_HW_TAGS - bool "Hardware tag-based mode" + bool "Hardware Tag-Based KASAN" depends on HAVE_ARCH_KASAN_HW_TAGS depends on SLUB help - Enables hardware tag-based KASAN mode. + Enables Hardware Tag-Based KASAN. + + Requires GCC 10+ or Clang 12+. - This mode requires hardware memory tagging support, and can be used - by any architecture that provides it. + Supported only on arm64 CPUs starting from ARMv8.5 and relies on + Memory Tagging Extension and Top Byte Ignore. - Currently this mode is only implemented for arm64 CPUs starting from - ARMv8.5 and relies on Memory Tagging Extension and Top Byte Ignore. + Consumes about 1/32nd of available memory. + + May potentially introduce problems related to pointer casting and + comparison, as it embeds a tag into the top byte of each pointer. endchoice @@ -131,83 +130,80 @@ choice config KASAN_OUTLINE bool "Outline instrumentation" help - Before every memory access compiler insert function call - __asan_load*/__asan_store*. These functions performs check - of shadow memory. This is slower than inline instrumentation, - however it doesn't bloat size of kernel's .text section so - much as inline does. + Makes the compiler insert function calls that check whether the memory + is accessible before each memory access. Slower than KASAN_INLINE, but + does not bloat the size of the kernel's .text section so much. config KASAN_INLINE bool "Inline instrumentation" depends on !ARCH_DISABLE_KASAN_INLINE help - Compiler directly inserts code checking shadow memory before - memory accesses. This is faster than outline (in some workloads - it gives about x2 boost over outline instrumentation), but - make kernel's .text size much bigger. + Makes the compiler directly insert memory accessibility checks before + each memory access. Faster than KASAN_OUTLINE (gives ~x2 boost for + some workloads), but makes the kernel's .text size much bigger. endchoice config KASAN_STACK - bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST + bool "Stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST depends on KASAN_GENERIC || KASAN_SW_TAGS depends on !ARCH_DISABLE_KASAN_INLINE default y if CC_IS_GCC help - The LLVM stack address sanitizer has a know problem that - causes excessive stack usage in a lot of functions, see - https://bugs.llvm.org/show_bug.cgi?id=38809 - Disabling asan-stack makes it safe to run kernels build - with clang-8 with KASAN enabled, though it loses some of - the functionality. - This feature is always disabled when compile-testing with clang - to avoid cluttering the output in stack overflow warnings, - but clang users can still enable it for builds without - CONFIG_COMPILE_TEST. On gcc it is assumed to always be safe - to use and enabled by default. - If the architecture disables inline instrumentation, stack - instrumentation is also disabled as it adds inline-style - instrumentation that is run unconditionally. + Disables stack instrumentation and thus KASAN's ability to detect + out-of-bounds bugs in stack variables. + + With Clang, stack instrumentation has a problem that causes excessive + stack usage, see https://bugs.llvm.org/show_bug.cgi?id=38809. Thus, + with Clang, this option is deemed unsafe. + + This option is always disabled when compile-testing with Clang to + avoid cluttering the log with stack overflow warnings. + + With GCC, enabling stack instrumentation is assumed to be safe. + + If the architecture disables inline instrumentation via + ARCH_DISABLE_KASAN_INLINE, stack instrumentation gets disabled + as well, as it adds inline-style instrumentation that is run + unconditionally. config KASAN_TAGS_IDENTIFY - bool "Enable memory corruption identification" + bool "Memory corruption type identification" depends on KASAN_SW_TAGS || KASAN_HW_TAGS help - This option enables best-effort identification of bug type - (use-after-free or out-of-bounds) at the cost of increased - memory consumption. + Enables best-effort identification of the bug types (use-after-free + or out-of-bounds) at the cost of increased memory consumption. + Only applicable for the tag-based KASAN modes. config KASAN_VMALLOC bool "Check accesses to vmalloc allocations" depends on HAVE_ARCH_KASAN_VMALLOC help - This mode makes KASAN check accesses to vmalloc allocations for - validity. + Makes KASAN check the validity of accesses to vmalloc allocations. - With software KASAN modes, checking is done for all types of vmalloc - allocations. Enabling this option leads to higher memory usage. + With software KASAN modes, all types vmalloc allocations are + checked. Enabling this option leads to higher memory usage. - With hardware tag-based KASAN, only VM_ALLOC mappings are checked. - There is no additional memory usage. + With Hardware Tag-Based KASAN, only non-executable VM_ALLOC mappings + are checked. There is no additional memory usage. config KASAN_KUNIT_TEST tristate "KUnit-compatible tests of KASAN bug detection capabilities" if !KUNIT_ALL_TESTS depends on KASAN && KUNIT default KUNIT_ALL_TESTS help - This is a KUnit test suite doing various nasty things like - out of bounds and use after free accesses. It is useful for testing - kernel debugging features like KASAN. + A KUnit-based KASAN test suite. Triggers different kinds of + out-of-bounds and use-after-free accesses. Useful for testing whether + KASAN can detect certain bug types. For more information on KUnit and unit tests in general, please refer - to the KUnit documentation in Documentation/dev-tools/kunit. + to the KUnit documentation in Documentation/dev-tools/kunit/. config KASAN_MODULE_TEST tristate "KUnit-incompatible tests of KASAN bug detection capabilities" depends on m && KASAN && !KASAN_HW_TAGS help - This is a part of the KASAN test suite that is incompatible with - KUnit. Currently includes tests that do bad copy_from/to_user - accesses. + A part of the KASAN test suite that is not integrated with KUnit. + Incompatible with Hardware Tag-Based KASAN. endif # KASAN diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan index de022445fbba..47a693c45864 100644 --- a/lib/Kconfig.kcsan +++ b/lib/Kconfig.kcsan @@ -187,7 +187,9 @@ config KCSAN_WEAK_MEMORY # We can either let objtool nop __tsan_func_{entry,exit}() and builtin # atomics instrumentation in .noinstr.text, or use a compiler that can # implement __no_kcsan to really remove all instrumentation. - depends on STACK_VALIDATION || CC_IS_GCC || CLANG_VERSION >= 140000 + depends on !ARCH_WANTS_NO_INSTR || HAVE_NOINSTR_HACK || \ + CC_IS_GCC || CLANG_VERSION >= 140000 + select OBJTOOL if HAVE_NOINSTR_HACK help Enable support for modeling a subset of weak memory, which allows detecting a subset of data races due to missing memory barriers. diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index f3c57ed51838..a9f7eb047768 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan @@ -94,7 +94,7 @@ config UBSAN_UNREACHABLE bool "Perform checking for unreachable code" # objtool already handles unreachable checking and gets angry about # seeing UBSan instrumentation located in unreachable places. - depends on !STACK_VALIDATION + depends on !(OBJTOOL && (STACK_VALIDATION || UNWINDER_ORC || HAVE_UACCESS_VALIDATION)) depends on $(cc-option,-fsanitize=unreachable) help This option enables -fsanitize=unreachable which checks for control diff --git a/lib/Makefile b/lib/Makefile index 6b9ffc1bd1ee..f99bf61f8bbc 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -251,6 +251,7 @@ obj-$(CONFIG_DIMLIB) += dim/ obj-$(CONFIG_SIGNATURE) += digsig.o lib-$(CONFIG_CLZ_TAB) += clz_tab.o +lib-$(CONFIG_LIB_MEMNEQ) += memneq.o obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o @@ -263,6 +264,8 @@ obj-$(CONFIG_MEMREGION) += memregion.o obj-$(CONFIG_STMP_DEVICE) += stmp_device.o obj-$(CONFIG_IRQ_POLL) += irq_poll.o +obj-$(CONFIG_POLYNOMIAL) += polynomial.o + # stackdepot.c should not be instrumented or call instrumented functions. # Prevent the compiler from calling builtins like memcmp() or bcmp() from this # file. @@ -279,7 +282,15 @@ $(foreach file, $(libfdt_files), \ $(eval CFLAGS_$(file) = -I $(srctree)/scripts/dtc/libfdt)) lib-$(CONFIG_LIBFDT) += $(libfdt_files) -lib-$(CONFIG_BOOT_CONFIG) += bootconfig.o +obj-$(CONFIG_BOOT_CONFIG) += bootconfig.o +obj-$(CONFIG_BOOT_CONFIG_EMBED) += bootconfig-data.o + +$(obj)/bootconfig-data.o: $(obj)/default.bconf + +targets += default.bconf +filechk_defbconf = cat $(or $(real-prereqs), /dev/null) +$(obj)/default.bconf: $(CONFIG_BOOT_CONFIG_EMBED_FILE) FORCE + $(call filechk,defbconf) obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o diff --git a/lib/assoc_array.c b/lib/assoc_array.c index 079c72e26493..ca0b4f360c1a 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c @@ -1461,6 +1461,7 @@ int assoc_array_gc(struct assoc_array *array, struct assoc_array_ptr *cursor, *ptr; struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp; unsigned long nr_leaves_on_tree; + bool retained; int keylen, slot, nr_free, next_slot, i; pr_devel("-->%s()\n", __func__); @@ -1536,6 +1537,7 @@ continue_node: goto descend; } +retry_compress: pr_devel("-- compress node %p --\n", new_n); /* Count up the number of empty slots in this node and work out the @@ -1553,6 +1555,7 @@ continue_node: pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch); /* See what we can fold in */ + retained = false; next_slot = 0; for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) { struct assoc_array_shortcut *s; @@ -1602,9 +1605,14 @@ continue_node: pr_devel("[%d] retain node %lu/%d [nx %d]\n", slot, child->nr_leaves_on_branch, nr_free + 1, next_slot); + retained = true; } } + if (retained && new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) { + pr_devel("internal nodes remain despite enough space, retrying\n"); + goto retry_compress; + } pr_devel("after: %lu\n", new_n->nr_leaves_on_branch); nr_leaves_on_tree = new_n->nr_leaves_on_branch; diff --git a/lib/bitmap.c b/lib/bitmap.c index 0d5c2ece0bcb..b18e31ea6e66 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -45,19 +45,19 @@ * for the best explanations of this ordering. */ -int __bitmap_equal(const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int bits) +bool __bitmap_equal(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int bits) { unsigned int k, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; ++k) if (bitmap1[k] != bitmap2[k]) - return 0; + return false; if (bits % BITS_PER_LONG) if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) - return 0; + return false; - return 1; + return true; } EXPORT_SYMBOL(__bitmap_equal); @@ -303,33 +303,33 @@ void __bitmap_replace(unsigned long *dst, } EXPORT_SYMBOL(__bitmap_replace); -int __bitmap_intersects(const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int bits) +bool __bitmap_intersects(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int bits) { unsigned int k, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; ++k) if (bitmap1[k] & bitmap2[k]) - return 1; + return true; if (bits % BITS_PER_LONG) if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) - return 1; - return 0; + return true; + return false; } EXPORT_SYMBOL(__bitmap_intersects); -int __bitmap_subset(const unsigned long *bitmap1, - const unsigned long *bitmap2, unsigned int bits) +bool __bitmap_subset(const unsigned long *bitmap1, + const unsigned long *bitmap2, unsigned int bits) { unsigned int k, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; ++k) if (bitmap1[k] & ~bitmap2[k]) - return 0; + return false; if (bits % BITS_PER_LONG) if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) - return 0; - return 1; + return false; + return true; } EXPORT_SYMBOL(__bitmap_subset); @@ -527,33 +527,39 @@ static int bitmap_print_to_buf(bool list, char *buf, const unsigned long *maskp, * cpumap_print_to_pagebuf() or directly by drivers to export hexadecimal * bitmask and decimal list to userspace by sysfs ABI. * Drivers might be using a normal attribute for this kind of ABIs. A - * normal attribute typically has show entry as below: - * static ssize_t example_attribute_show(struct device *dev, + * normal attribute typically has show entry as below:: + * + * static ssize_t example_attribute_show(struct device *dev, * struct device_attribute *attr, char *buf) - * { + * { * ... * return bitmap_print_to_pagebuf(true, buf, &mask, nr_trig_max); - * } + * } + * * show entry of attribute has no offset and count parameters and this * means the file is limited to one page only. * bitmap_print_to_pagebuf() API works terribly well for this kind of - * normal attribute with buf parameter and without offset, count: - * bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, + * normal attribute with buf parameter and without offset, count:: + * + * bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, * int nmaskbits) - * { - * } + * { + * } + * * The problem is once we have a large bitmap, we have a chance to get a * bitmask or list more than one page. Especially for list, it could be * as complex as 0,3,5,7,9,... We have no simple way to know it exact size. * It turns out bin_attribute is a way to break this limit. bin_attribute - * has show entry as below: - * static ssize_t - * example_bin_attribute_show(struct file *filp, struct kobject *kobj, + * has show entry as below:: + * + * static ssize_t + * example_bin_attribute_show(struct file *filp, struct kobject *kobj, * struct bin_attribute *attr, char *buf, * loff_t offset, size_t count) - * { + * { * ... - * } + * } + * * With the new offset and count parameters, this makes sysfs ABI be able * to support file size more than one page. For example, offset could be * >= 4096. @@ -577,6 +583,7 @@ static int bitmap_print_to_buf(bool list, char *buf, const unsigned long *maskp, * This function is not a replacement for sprintf() or bitmap_print_to_pagebuf(). * It is intended to workaround sysfs limitations discussed above and should be * used carefully in general case for the following reasons: + * * - Time complexity is O(nbits^2/count), comparing to O(nbits) for snprintf(). * - Memory complexity is O(nbits), comparing to O(1) for snprintf(). * - @off and @count are NOT offset and number of bits to print. @@ -1505,5 +1512,59 @@ void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits) buf[halfwords - 1] &= (u32) (UINT_MAX >> ((-nbits) & 31)); } EXPORT_SYMBOL(bitmap_to_arr32); +#endif + +#if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN) +/** + * bitmap_from_arr64 - copy the contents of u64 array of bits to bitmap + * @bitmap: array of unsigned longs, the destination bitmap + * @buf: array of u64 (in host byte order), the source bitmap + * @nbits: number of bits in @bitmap + */ +void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits) +{ + int n; + + for (n = nbits; n > 0; n -= 64) { + u64 val = *buf++; + *bitmap++ = val; + if (n > 32) + *bitmap++ = val >> 32; + } + + /* + * Clear tail bits in the last word beyond nbits. + * + * Negative index is OK because here we point to the word next + * to the last word of the bitmap, except for nbits == 0, which + * is tested implicitly. + */ + if (nbits % BITS_PER_LONG) + bitmap[-1] &= BITMAP_LAST_WORD_MASK(nbits); +} +EXPORT_SYMBOL(bitmap_from_arr64); + +/** + * bitmap_to_arr64 - copy the contents of bitmap to a u64 array of bits + * @buf: array of u64 (in host byte order), the dest bitmap + * @bitmap: array of unsigned longs, the source bitmap + * @nbits: number of bits in @bitmap + */ +void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits) +{ + const unsigned long *end = bitmap + BITS_TO_LONGS(nbits); + + while (bitmap < end) { + *buf = *bitmap++; + if (bitmap < end) + *buf |= (u64)(*bitmap++) << 32; + buf++; + } + + /* Clear tail bits in the last element of array beyond nbits. */ + if (nbits % 64) + buf[-1] &= GENMASK_ULL(nbits % 64, 0); +} +EXPORT_SYMBOL(bitmap_to_arr64); #endif diff --git a/lib/bootconfig-data.S b/lib/bootconfig-data.S new file mode 100644 index 000000000000..ef85ba1a82f4 --- /dev/null +++ b/lib/bootconfig-data.S @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Embed default bootconfig in the kernel. + */ + .section .init.rodata, "aw" + .global embedded_bootconfig_data +embedded_bootconfig_data: + .incbin "lib/default.bconf" + .global embedded_bootconfig_data_end +embedded_bootconfig_data_end: diff --git a/lib/bootconfig.c b/lib/bootconfig.c index 74f3201ab8e5..c59d26068a64 100644 --- a/lib/bootconfig.c +++ b/lib/bootconfig.c @@ -12,6 +12,19 @@ #include <linux/kernel.h> #include <linux/memblock.h> #include <linux/string.h> + +#ifdef CONFIG_BOOT_CONFIG_EMBED +/* embedded_bootconfig_data is defined in bootconfig-data.S */ +extern __visible const char embedded_bootconfig_data[]; +extern __visible const char embedded_bootconfig_data_end[]; + +const char * __init xbc_get_embedded_bootconfig(size_t *size) +{ + *size = embedded_bootconfig_data_end - embedded_bootconfig_data; + return (*size) ? embedded_bootconfig_data : NULL; +} +#endif + #else /* !__KERNEL__ */ /* * NOTE: This is only for tools/bootconfig, because tools/bootconfig will diff --git a/lib/bug.c b/lib/bug.c index 45a0584f6541..c223a2575b72 100644 --- a/lib/bug.c +++ b/lib/bug.c @@ -6,8 +6,7 @@ CONFIG_BUG - emit BUG traps. Nothing happens without this. CONFIG_GENERIC_BUG - enable this code. - CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit pointers relative to - the containing struct bug_entry for bug_addr and file. + CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit relative pointers for bug_addr and file CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable @@ -53,10 +52,10 @@ extern struct bug_entry __start___bug_table[], __stop___bug_table[]; static inline unsigned long bug_addr(const struct bug_entry *bug) { -#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS - return bug->bug_addr; +#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS + return (unsigned long)&bug->bug_addr_disp + bug->bug_addr_disp; #else - return (unsigned long)bug + bug->bug_addr_disp; + return bug->bug_addr; #endif } @@ -131,10 +130,10 @@ void bug_get_file_line(struct bug_entry *bug, const char **file, unsigned int *line) { #ifdef CONFIG_DEBUG_BUGVERBOSE -#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS - *file = bug->file; +#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS + *file = (const char *)&bug->file_disp + bug->file_disp; #else - *file = (const char *)bug + bug->file_disp; + *file = bug->file; #endif *line = bug->line; #else diff --git a/lib/crc-itu-t.c b/lib/crc-itu-t.c index 1974b355c148..1d26a1647da5 100644 --- a/lib/crc-itu-t.c +++ b/lib/crc-itu-t.c @@ -7,7 +7,7 @@ #include <linux/module.h> #include <linux/crc-itu-t.h> -/** CRC table for the CRC ITU-T V.41 0x1021 (x^16 + x^12 + x^15 + 1) */ +/* CRC table for the CRC ITU-T V.41 0x1021 (x^16 + x^12 + x^5 + 1) */ const u16 crc_itu_t_table[256] = { 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 379a66d7f504..2082af43d51f 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -71,6 +71,7 @@ config CRYPTO_LIB_CURVE25519 tristate "Curve25519 scalar multiplication library" depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519 select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n + select LIB_MEMNEQ help Enable the Curve25519 library interface. This interface may be fulfilled by either the generic implementation or an arch-specific @@ -123,10 +124,4 @@ config CRYPTO_LIB_CHACHA20POLY1305 config CRYPTO_LIB_SHA256 tristate -config CRYPTO_LIB_SM3 - tristate - -config CRYPTO_LIB_SM4 - tristate - endmenu diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index 6c872d05d1e6..26be2bbe09c5 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -37,12 +37,6 @@ libpoly1305-y += poly1305.o obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o libsha256-y := sha256.o -obj-$(CONFIG_CRYPTO_LIB_SM3) += libsm3.o -libsm3-y := sm3.o - -obj-$(CONFIG_CRYPTO_LIB_SM4) += libsm4.o -libsm4-y := sm4.o - ifneq ($(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS),y) libblake2s-y += blake2s-selftest.o libchacha20poly1305-y += chacha20poly1305-selftest.o diff --git a/lib/crypto/sm3.c b/lib/crypto/sm3.c deleted file mode 100644 index d473e358a873..000000000000 --- a/lib/crypto/sm3.c +++ /dev/null @@ -1,246 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and described - * at https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02 - * - * Copyright (C) 2017 ARM Limited or its affiliates. - * Copyright (C) 2017 Gilad Ben-Yossef <gilad@benyossef.com> - * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> - */ - -#include <linux/module.h> -#include <asm/unaligned.h> -#include <crypto/sm3.h> - -static const u32 ____cacheline_aligned K[64] = { - 0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb, - 0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc, - 0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce, - 0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6, - 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c, - 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce, - 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec, - 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5, - 0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53, - 0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d, - 0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4, - 0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43, - 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c, - 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce, - 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec, - 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5 -}; - -/* - * Transform the message X which consists of 16 32-bit-words. See - * GM/T 004-2012 for details. - */ -#define R(i, a, b, c, d, e, f, g, h, t, w1, w2) \ - do { \ - ss1 = rol32((rol32((a), 12) + (e) + (t)), 7); \ - ss2 = ss1 ^ rol32((a), 12); \ - d += FF ## i(a, b, c) + ss2 + ((w1) ^ (w2)); \ - h += GG ## i(e, f, g) + ss1 + (w1); \ - b = rol32((b), 9); \ - f = rol32((f), 19); \ - h = P0((h)); \ - } while (0) - -#define R1(a, b, c, d, e, f, g, h, t, w1, w2) \ - R(1, a, b, c, d, e, f, g, h, t, w1, w2) -#define R2(a, b, c, d, e, f, g, h, t, w1, w2) \ - R(2, a, b, c, d, e, f, g, h, t, w1, w2) - -#define FF1(x, y, z) (x ^ y ^ z) -#define FF2(x, y, z) ((x & y) | (x & z) | (y & z)) - -#define GG1(x, y, z) FF1(x, y, z) -#define GG2(x, y, z) ((x & y) | (~x & z)) - -/* Message expansion */ -#define P0(x) ((x) ^ rol32((x), 9) ^ rol32((x), 17)) -#define P1(x) ((x) ^ rol32((x), 15) ^ rol32((x), 23)) -#define I(i) (W[i] = get_unaligned_be32(data + i * 4)) -#define W1(i) (W[i & 0x0f]) -#define W2(i) (W[i & 0x0f] = \ - P1(W[i & 0x0f] \ - ^ W[(i-9) & 0x0f] \ - ^ rol32(W[(i-3) & 0x0f], 15)) \ - ^ rol32(W[(i-13) & 0x0f], 7) \ - ^ W[(i-6) & 0x0f]) - -static void sm3_transform(struct sm3_state *sctx, u8 const *data, u32 W[16]) -{ - u32 a, b, c, d, e, f, g, h, ss1, ss2; - - a = sctx->state[0]; - b = sctx->state[1]; - c = sctx->state[2]; - d = sctx->state[3]; - e = sctx->state[4]; - f = sctx->state[5]; - g = sctx->state[6]; - h = sctx->state[7]; - - R1(a, b, c, d, e, f, g, h, K[0], I(0), I(4)); - R1(d, a, b, c, h, e, f, g, K[1], I(1), I(5)); - R1(c, d, a, b, g, h, e, f, K[2], I(2), I(6)); - R1(b, c, d, a, f, g, h, e, K[3], I(3), I(7)); - R1(a, b, c, d, e, f, g, h, K[4], W1(4), I(8)); - R1(d, a, b, c, h, e, f, g, K[5], W1(5), I(9)); - R1(c, d, a, b, g, h, e, f, K[6], W1(6), I(10)); - R1(b, c, d, a, f, g, h, e, K[7], W1(7), I(11)); - R1(a, b, c, d, e, f, g, h, K[8], W1(8), I(12)); - R1(d, a, b, c, h, e, f, g, K[9], W1(9), I(13)); - R1(c, d, a, b, g, h, e, f, K[10], W1(10), I(14)); - R1(b, c, d, a, f, g, h, e, K[11], W1(11), I(15)); - R1(a, b, c, d, e, f, g, h, K[12], W1(12), W2(16)); - R1(d, a, b, c, h, e, f, g, K[13], W1(13), W2(17)); - R1(c, d, a, b, g, h, e, f, K[14], W1(14), W2(18)); - R1(b, c, d, a, f, g, h, e, K[15], W1(15), W2(19)); - - R2(a, b, c, d, e, f, g, h, K[16], W1(16), W2(20)); - R2(d, a, b, c, h, e, f, g, K[17], W1(17), W2(21)); - R2(c, d, a, b, g, h, e, f, K[18], W1(18), W2(22)); - R2(b, c, d, a, f, g, h, e, K[19], W1(19), W2(23)); - R2(a, b, c, d, e, f, g, h, K[20], W1(20), W2(24)); - R2(d, a, b, c, h, e, f, g, K[21], W1(21), W2(25)); - R2(c, d, a, b, g, h, e, f, K[22], W1(22), W2(26)); - R2(b, c, d, a, f, g, h, e, K[23], W1(23), W2(27)); - R2(a, b, c, d, e, f, g, h, K[24], W1(24), W2(28)); - R2(d, a, b, c, h, e, f, g, K[25], W1(25), W2(29)); - R2(c, d, a, b, g, h, e, f, K[26], W1(26), W2(30)); - R2(b, c, d, a, f, g, h, e, K[27], W1(27), W2(31)); - R2(a, b, c, d, e, f, g, h, K[28], W1(28), W2(32)); - R2(d, a, b, c, h, e, f, g, K[29], W1(29), W2(33)); - R2(c, d, a, b, g, h, e, f, K[30], W1(30), W2(34)); - R2(b, c, d, a, f, g, h, e, K[31], W1(31), W2(35)); - - R2(a, b, c, d, e, f, g, h, K[32], W1(32), W2(36)); - R2(d, a, b, c, h, e, f, g, K[33], W1(33), W2(37)); - R2(c, d, a, b, g, h, e, f, K[34], W1(34), W2(38)); - R2(b, c, d, a, f, g, h, e, K[35], W1(35), W2(39)); - R2(a, b, c, d, e, f, g, h, K[36], W1(36), W2(40)); - R2(d, a, b, c, h, e, f, g, K[37], W1(37), W2(41)); - R2(c, d, a, b, g, h, e, f, K[38], W1(38), W2(42)); - R2(b, c, d, a, f, g, h, e, K[39], W1(39), W2(43)); - R2(a, b, c, d, e, f, g, h, K[40], W1(40), W2(44)); - R2(d, a, b, c, h, e, f, g, K[41], W1(41), W2(45)); - R2(c, d, a, b, g, h, e, f, K[42], W1(42), W2(46)); - R2(b, c, d, a, f, g, h, e, K[43], W1(43), W2(47)); - R2(a, b, c, d, e, f, g, h, K[44], W1(44), W2(48)); - R2(d, a, b, c, h, e, f, g, K[45], W1(45), W2(49)); - R2(c, d, a, b, g, h, e, f, K[46], W1(46), W2(50)); - R2(b, c, d, a, f, g, h, e, K[47], W1(47), W2(51)); - - R2(a, b, c, d, e, f, g, h, K[48], W1(48), W2(52)); - R2(d, a, b, c, h, e, f, g, K[49], W1(49), W2(53)); - R2(c, d, a, b, g, h, e, f, K[50], W1(50), W2(54)); - R2(b, c, d, a, f, g, h, e, K[51], W1(51), W2(55)); - R2(a, b, c, d, e, f, g, h, K[52], W1(52), W2(56)); - R2(d, a, b, c, h, e, f, g, K[53], W1(53), W2(57)); - R2(c, d, a, b, g, h, e, f, K[54], W1(54), W2(58)); - R2(b, c, d, a, f, g, h, e, K[55], W1(55), W2(59)); - R2(a, b, c, d, e, f, g, h, K[56], W1(56), W2(60)); - R2(d, a, b, c, h, e, f, g, K[57], W1(57), W2(61)); - R2(c, d, a, b, g, h, e, f, K[58], W1(58), W2(62)); - R2(b, c, d, a, f, g, h, e, K[59], W1(59), W2(63)); - R2(a, b, c, d, e, f, g, h, K[60], W1(60), W2(64)); - R2(d, a, b, c, h, e, f, g, K[61], W1(61), W2(65)); - R2(c, d, a, b, g, h, e, f, K[62], W1(62), W2(66)); - R2(b, c, d, a, f, g, h, e, K[63], W1(63), W2(67)); - - sctx->state[0] ^= a; - sctx->state[1] ^= b; - sctx->state[2] ^= c; - sctx->state[3] ^= d; - sctx->state[4] ^= e; - sctx->state[5] ^= f; - sctx->state[6] ^= g; - sctx->state[7] ^= h; -} -#undef R -#undef R1 -#undef R2 -#undef I -#undef W1 -#undef W2 - -static inline void sm3_block(struct sm3_state *sctx, - u8 const *data, int blocks, u32 W[16]) -{ - while (blocks--) { - sm3_transform(sctx, data, W); - data += SM3_BLOCK_SIZE; - } -} - -void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len) -{ - unsigned int partial = sctx->count % SM3_BLOCK_SIZE; - u32 W[16]; - - sctx->count += len; - - if ((partial + len) >= SM3_BLOCK_SIZE) { - int blocks; - - if (partial) { - int p = SM3_BLOCK_SIZE - partial; - - memcpy(sctx->buffer + partial, data, p); - data += p; - len -= p; - - sm3_block(sctx, sctx->buffer, 1, W); - } - - blocks = len / SM3_BLOCK_SIZE; - len %= SM3_BLOCK_SIZE; - - if (blocks) { - sm3_block(sctx, data, blocks, W); - data += blocks * SM3_BLOCK_SIZE; - } - - memzero_explicit(W, sizeof(W)); - - partial = 0; - } - if (len) - memcpy(sctx->buffer + partial, data, len); -} -EXPORT_SYMBOL_GPL(sm3_update); - -void sm3_final(struct sm3_state *sctx, u8 *out) -{ - const int bit_offset = SM3_BLOCK_SIZE - sizeof(u64); - __be64 *bits = (__be64 *)(sctx->buffer + bit_offset); - __be32 *digest = (__be32 *)out; - unsigned int partial = sctx->count % SM3_BLOCK_SIZE; - u32 W[16]; - int i; - - sctx->buffer[partial++] = 0x80; - if (partial > bit_offset) { - memset(sctx->buffer + partial, 0, SM3_BLOCK_SIZE - partial); - partial = 0; - - sm3_block(sctx, sctx->buffer, 1, W); - } - - memset(sctx->buffer + partial, 0, bit_offset - partial); - *bits = cpu_to_be64(sctx->count << 3); - sm3_block(sctx, sctx->buffer, 1, W); - - for (i = 0; i < 8; i++) - put_unaligned_be32(sctx->state[i], digest++); - - /* Zeroize sensitive information. */ - memzero_explicit(W, sizeof(W)); - memzero_explicit(sctx, sizeof(*sctx)); -} -EXPORT_SYMBOL_GPL(sm3_final); - -MODULE_DESCRIPTION("Generic SM3 library"); -MODULE_LICENSE("GPL v2"); diff --git a/lib/crypto/sm4.c b/lib/crypto/sm4.c deleted file mode 100644 index 284e62576d0c..000000000000 --- a/lib/crypto/sm4.c +++ /dev/null @@ -1,176 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * SM4, as specified in - * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html - * - * Copyright (C) 2018 ARM Limited or its affiliates. - * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> - */ - -#include <linux/module.h> -#include <asm/unaligned.h> -#include <crypto/sm4.h> - -static const u32 fk[4] = { - 0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc -}; - -static const u32 ____cacheline_aligned ck[32] = { - 0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269, - 0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9, - 0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249, - 0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9, - 0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229, - 0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299, - 0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209, - 0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279 -}; - -static const u8 ____cacheline_aligned sbox[256] = { - 0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7, - 0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05, - 0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3, - 0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99, - 0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a, - 0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62, - 0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95, - 0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6, - 0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba, - 0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8, - 0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b, - 0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35, - 0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2, - 0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87, - 0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52, - 0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e, - 0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5, - 0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1, - 0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55, - 0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3, - 0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60, - 0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f, - 0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f, - 0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51, - 0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f, - 0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8, - 0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd, - 0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0, - 0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e, - 0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84, - 0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20, - 0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48 -}; - -static inline u32 sm4_t_non_lin_sub(u32 x) -{ - u32 out; - - out = (u32)sbox[x & 0xff]; - out |= (u32)sbox[(x >> 8) & 0xff] << 8; - out |= (u32)sbox[(x >> 16) & 0xff] << 16; - out |= (u32)sbox[(x >> 24) & 0xff] << 24; - - return out; -} - -static inline u32 sm4_key_lin_sub(u32 x) -{ - return x ^ rol32(x, 13) ^ rol32(x, 23); -} - -static inline u32 sm4_enc_lin_sub(u32 x) -{ - return x ^ rol32(x, 2) ^ rol32(x, 10) ^ rol32(x, 18) ^ rol32(x, 24); -} - -static inline u32 sm4_key_sub(u32 x) -{ - return sm4_key_lin_sub(sm4_t_non_lin_sub(x)); -} - -static inline u32 sm4_enc_sub(u32 x) -{ - return sm4_enc_lin_sub(sm4_t_non_lin_sub(x)); -} - -static inline u32 sm4_round(u32 x0, u32 x1, u32 x2, u32 x3, u32 rk) -{ - return x0 ^ sm4_enc_sub(x1 ^ x2 ^ x3 ^ rk); -} - - -/** - * sm4_expandkey - Expands the SM4 key as described in GB/T 32907-2016 - * @ctx: The location where the computed key will be stored. - * @in_key: The supplied key. - * @key_len: The length of the supplied key. - * - * Returns 0 on success. The function fails only if an invalid key size (or - * pointer) is supplied. - */ -int sm4_expandkey(struct sm4_ctx *ctx, const u8 *in_key, - unsigned int key_len) -{ - u32 rk[4]; - const u32 *key = (u32 *)in_key; - int i; - - if (key_len != SM4_KEY_SIZE) - return -EINVAL; - - rk[0] = get_unaligned_be32(&key[0]) ^ fk[0]; - rk[1] = get_unaligned_be32(&key[1]) ^ fk[1]; - rk[2] = get_unaligned_be32(&key[2]) ^ fk[2]; - rk[3] = get_unaligned_be32(&key[3]) ^ fk[3]; - - for (i = 0; i < 32; i += 4) { - rk[0] ^= sm4_key_sub(rk[1] ^ rk[2] ^ rk[3] ^ ck[i + 0]); - rk[1] ^= sm4_key_sub(rk[2] ^ rk[3] ^ rk[0] ^ ck[i + 1]); - rk[2] ^= sm4_key_sub(rk[3] ^ rk[0] ^ rk[1] ^ ck[i + 2]); - rk[3] ^= sm4_key_sub(rk[0] ^ rk[1] ^ rk[2] ^ ck[i + 3]); - - ctx->rkey_enc[i + 0] = rk[0]; - ctx->rkey_enc[i + 1] = rk[1]; - ctx->rkey_enc[i + 2] = rk[2]; - ctx->rkey_enc[i + 3] = rk[3]; - ctx->rkey_dec[31 - 0 - i] = rk[0]; - ctx->rkey_dec[31 - 1 - i] = rk[1]; - ctx->rkey_dec[31 - 2 - i] = rk[2]; - ctx->rkey_dec[31 - 3 - i] = rk[3]; - } - - return 0; -} -EXPORT_SYMBOL_GPL(sm4_expandkey); - -/** - * sm4_crypt_block - Encrypt or decrypt a single SM4 block - * @rk: The rkey_enc for encrypt or rkey_dec for decrypt - * @out: Buffer to store output data - * @in: Buffer containing the input data - */ -void sm4_crypt_block(const u32 *rk, u8 *out, const u8 *in) -{ - u32 x[4], i; - - x[0] = get_unaligned_be32(in + 0 * 4); - x[1] = get_unaligned_be32(in + 1 * 4); - x[2] = get_unaligned_be32(in + 2 * 4); - x[3] = get_unaligned_be32(in + 3 * 4); - - for (i = 0; i < 32; i += 4) { - x[0] = sm4_round(x[0], x[1], x[2], x[3], rk[i + 0]); - x[1] = sm4_round(x[1], x[2], x[3], x[0], rk[i + 1]); - x[2] = sm4_round(x[2], x[3], x[0], x[1], rk[i + 2]); - x[3] = sm4_round(x[3], x[0], x[1], x[2], rk[i + 3]); - } - - put_unaligned_be32(x[3 - 0], out + 0 * 4); - put_unaligned_be32(x[3 - 1], out + 1 * 4); - put_unaligned_be32(x[3 - 2], out + 2 * 4); - put_unaligned_be32(x[3 - 3], out + 3 * 4); -} -EXPORT_SYMBOL_GPL(sm4_crypt_block); - -MODULE_DESCRIPTION("Generic SM4 library"); -MODULE_LICENSE("GPL v2"); diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 6946f8e204e3..337d797a7141 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -1,11 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Generic infrastructure for lifetime debugging of objects. * - * Started by Thomas Gleixner - * * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> - * - * For licencing details see kernel-base/COPYING */ #define pr_fmt(fmt) "ODEBUG: " fmt diff --git a/lib/dump_stack.c b/lib/dump_stack.c index 6b7f1bf6715d..83471e81501a 100644 --- a/lib/dump_stack.c +++ b/lib/dump_stack.c @@ -102,9 +102,9 @@ asmlinkage __visible void dump_stack_lvl(const char *log_lvl) * Permit this cpu to perform nested stack dumps while serialising * against other CPUs */ - printk_cpu_lock_irqsave(flags); + printk_cpu_sync_get_irqsave(flags); __dump_stack(log_lvl); - printk_cpu_unlock_irqrestore(flags); + printk_cpu_sync_put_irqrestore(flags); } EXPORT_SYMBOL(dump_stack_lvl); diff --git a/lib/fault-inject.c b/lib/fault-inject.c index ce12621b4275..423784d9c058 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -41,6 +41,9 @@ EXPORT_SYMBOL_GPL(setup_fault_attr); static void fail_dump(struct fault_attr *attr) { + if (attr->no_warn) + return; + if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) { printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n" "name %pd, interval %lu, probability %lu, " diff --git a/lib/glob.c b/lib/glob.c index 85ecbda45cd8..15b73f490720 100644 --- a/lib/glob.c +++ b/lib/glob.c @@ -45,7 +45,7 @@ bool __pure glob_match(char const *pat, char const *str) * (no exception for /), it can be easily proved that there's * never a need to backtrack multiple levels. */ - char const *back_pat = NULL, *back_str = back_str; + char const *back_pat = NULL, *back_str; /* * Loop over each token (character or class) in pat, matching diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 6dd5330f7a99..0b64695ab632 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -1434,7 +1434,7 @@ static ssize_t iter_xarray_get_pages(struct iov_iter *i, { unsigned nr, offset; pgoff_t index, count; - size_t size = maxsize, actual; + size_t size = maxsize; loff_t pos; if (!size || !maxpages) @@ -1461,13 +1461,7 @@ static ssize_t iter_xarray_get_pages(struct iov_iter *i, if (nr == 0) return 0; - actual = PAGE_SIZE * nr; - actual -= offset; - if (nr == count && size > 0) { - unsigned last_offset = (nr > 1) ? 0 : offset; - actual -= PAGE_SIZE - (last_offset + size); - } - return actual; + return min_t(size_t, nr * PAGE_SIZE - offset, maxsize); } /* must be done on non-empty ITER_IOVEC one */ @@ -1602,7 +1596,7 @@ static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i, struct page **p; unsigned nr, offset; pgoff_t index, count; - size_t size = maxsize, actual; + size_t size = maxsize; loff_t pos; if (!size) @@ -1631,13 +1625,7 @@ static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i, if (nr == 0) return 0; - actual = PAGE_SIZE * nr; - actual -= offset; - if (nr == count && size > 0) { - unsigned last_offset = (nr > 1) ? 0 : offset; - actual -= PAGE_SIZE - (last_offset + size); - } - return actual; + return min_t(size_t, nr * PAGE_SIZE - offset, maxsize); } ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, diff --git a/lib/irq_poll.c b/lib/irq_poll.c index 2f17b488d58e..2d5329a42105 100644 --- a/lib/irq_poll.c +++ b/lib/irq_poll.c @@ -188,14 +188,18 @@ EXPORT_SYMBOL(irq_poll_init); static int irq_poll_cpu_dead(unsigned int cpu) { /* - * If a CPU goes away, splice its entries to the current CPU - * and trigger a run of the softirq + * If a CPU goes away, splice its entries to the current CPU and + * set the POLL softirq bit. The local_bh_disable()/enable() pair + * ensures that it is handled. Otherwise the current CPU could + * reach idle with the POLL softirq pending. */ + local_bh_disable(); local_irq_disable(); list_splice_init(&per_cpu(blk_cpu_iopoll, cpu), this_cpu_ptr(&blk_cpu_iopoll)); __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); local_irq_enable(); + local_bh_enable(); return 0; } diff --git a/lib/kstrtox.c b/lib/kstrtox.c index 886510d248e5..08c14019841a 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c @@ -340,7 +340,7 @@ EXPORT_SYMBOL(kstrtos8); * @s: input string * @res: result * - * This routine returns 0 iff the first character is one of 'Yy1Nn0', or + * This routine returns 0 iff the first character is one of 'YyTt1NnFf0', or * [oO][NnFf] for "on" and "off". Otherwise it will return -EINVAL. Value * pointed to by res is updated upon finding a match. */ @@ -353,11 +353,15 @@ int kstrtobool(const char *s, bool *res) switch (s[0]) { case 'y': case 'Y': + case 't': + case 'T': case '1': *res = true; return 0; case 'n': case 'N': + case 'f': + case 'F': case '0': *res = false; return 0; diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile index c49f4ffb6273..29aff6562b42 100644 --- a/lib/kunit/Makefile +++ b/lib/kunit/Makefile @@ -1,6 +1,7 @@ obj-$(CONFIG_KUNIT) += kunit.o kunit-objs += test.o \ + resource.o \ string-stream.o \ assert.o \ try-catch.o \ diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c index b71db0abc12b..1048ef1b8d6e 100644 --- a/lib/kunit/debugfs.c +++ b/lib/kunit/debugfs.c @@ -52,7 +52,7 @@ static void debugfs_print_result(struct seq_file *seq, static int debugfs_print_results(struct seq_file *seq, void *v) { struct kunit_suite *suite = (struct kunit_suite *)seq->private; - bool success = kunit_suite_has_succeeded(suite); + enum kunit_status success = kunit_suite_has_succeeded(suite); struct kunit_case *test_case; if (!suite || !suite->log) diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c index 22640c9ee819..96f96e42ce06 100644 --- a/lib/kunit/executor.c +++ b/lib/kunit/executor.c @@ -71,9 +71,13 @@ kunit_filter_tests(struct kunit_suite *const suite, const char *test_glob) /* Use memcpy to workaround copy->name being const. */ copy = kmalloc(sizeof(*copy), GFP_KERNEL); + if (!copy) + return ERR_PTR(-ENOMEM); memcpy(copy, suite, sizeof(*copy)); filtered = kcalloc(n + 1, sizeof(*filtered), GFP_KERNEL); + if (!filtered) + return ERR_PTR(-ENOMEM); n = 0; kunit_suite_for_each_test_case(suite, test_case) { @@ -106,14 +110,16 @@ kunit_filter_subsuite(struct kunit_suite * const * const subsuite, filtered = kmalloc_array(n + 1, sizeof(*filtered), GFP_KERNEL); if (!filtered) - return NULL; + return ERR_PTR(-ENOMEM); n = 0; for (i = 0; subsuite[i] != NULL; ++i) { if (!glob_match(filter->suite_glob, subsuite[i]->name)) continue; filtered_suite = kunit_filter_tests(subsuite[i], filter->test_glob); - if (filtered_suite) + if (IS_ERR(filtered_suite)) + return ERR_CAST(filtered_suite); + else if (filtered_suite) filtered[n++] = filtered_suite; } filtered[n] = NULL; @@ -146,7 +152,8 @@ static void kunit_free_suite_set(struct suite_set suite_set) } static struct suite_set kunit_filter_suites(const struct suite_set *suite_set, - const char *filter_glob) + const char *filter_glob, + int *err) { int i; struct kunit_suite * const **copy, * const *filtered_subsuite; @@ -166,6 +173,10 @@ static struct suite_set kunit_filter_suites(const struct suite_set *suite_set, for (i = 0; i < max; ++i) { filtered_subsuite = kunit_filter_subsuite(suite_set->start[i], &filter); + if (IS_ERR(filtered_subsuite)) { + *err = PTR_ERR(filtered_subsuite); + return filtered; + } if (filtered_subsuite) *copy++ = filtered_subsuite; } @@ -236,9 +247,15 @@ int kunit_run_all_tests(void) .start = __kunit_suites_start, .end = __kunit_suites_end, }; + int err = 0; - if (filter_glob_param) - suite_set = kunit_filter_suites(&suite_set, filter_glob_param); + if (filter_glob_param) { + suite_set = kunit_filter_suites(&suite_set, filter_glob_param, &err); + if (err) { + pr_err("kunit executor: error filtering suites: %d\n", err); + goto out; + } + } if (!action_param) kunit_exec_run_tests(&suite_set); @@ -251,9 +268,10 @@ int kunit_run_all_tests(void) kunit_free_suite_set(suite_set); } - kunit_handle_shutdown(); - return 0; +out: + kunit_handle_shutdown(); + return err; } #if IS_BUILTIN(CONFIG_KUNIT_TEST) diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c index 4ed57fd94e42..eac6ff480273 100644 --- a/lib/kunit/executor_test.c +++ b/lib/kunit/executor_test.c @@ -137,14 +137,16 @@ static void filter_suites_test(struct kunit *test) .end = suites + 2, }; struct suite_set filtered = {.start = NULL, .end = NULL}; + int err = 0; /* Emulate two files, each having one suite */ subsuites[0][0] = alloc_fake_suite(test, "suite0", dummy_test_cases); subsuites[1][0] = alloc_fake_suite(test, "suite1", dummy_test_cases); /* Filter out suite1 */ - filtered = kunit_filter_suites(&suite_set, "suite0"); + filtered = kunit_filter_suites(&suite_set, "suite0", &err); kfree_subsuites_at_end(test, &filtered); /* let us use ASSERTs without leaking */ + KUNIT_EXPECT_EQ(test, err, 0); KUNIT_ASSERT_EQ(test, filtered.end - filtered.start, (ptrdiff_t)1); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start); diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c index 4bbf37c04eba..f8fe582c9e36 100644 --- a/lib/kunit/kunit-example-test.c +++ b/lib/kunit/kunit-example-test.c @@ -41,6 +41,17 @@ static int example_test_init(struct kunit *test) } /* + * This is run once before all test cases in the suite. + * See the comment on example_test_suite for more information. + */ +static int example_test_init_suite(struct kunit_suite *suite) +{ + kunit_info(suite, "initializing suite\n"); + + return 0; +} + +/* * This test should always be skipped. */ static void example_skip_test(struct kunit *test) @@ -91,6 +102,8 @@ static void example_all_expect_macros_test(struct kunit *test) KUNIT_EXPECT_NOT_ERR_OR_NULL(test, test); KUNIT_EXPECT_PTR_EQ(test, NULL, NULL); KUNIT_EXPECT_PTR_NE(test, test, NULL); + KUNIT_EXPECT_NULL(test, NULL); + KUNIT_EXPECT_NOT_NULL(test, test); /* String assertions */ KUNIT_EXPECT_STREQ(test, "hi", "hi"); @@ -140,17 +153,20 @@ static struct kunit_case example_test_cases[] = { * may be specified which runs after every test case and can be used to for * cleanup. For clarity, running tests in a test suite would behave as follows: * + * suite.suite_init(suite); * suite.init(test); * suite.test_case[0](test); * suite.exit(test); * suite.init(test); * suite.test_case[1](test); * suite.exit(test); + * suite.suite_exit(suite); * ...; */ static struct kunit_suite example_test_suite = { .name = "example", .init = example_test_init, + .suite_init = example_test_init_suite, .test_cases = example_test_cases, }; diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c index 555601d17f79..13d0bd8b07a9 100644 --- a/lib/kunit/kunit-test.c +++ b/lib/kunit/kunit-test.c @@ -190,6 +190,40 @@ static void kunit_resource_test_destroy_resource(struct kunit *test) KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources)); } +static void kunit_resource_test_remove_resource(struct kunit *test) +{ + struct kunit_test_resource_context *ctx = test->priv; + struct kunit_resource *res = kunit_alloc_and_get_resource( + &ctx->test, + fake_resource_init, + fake_resource_free, + GFP_KERNEL, + ctx); + + /* The resource is in the list */ + KUNIT_EXPECT_FALSE(test, list_empty(&ctx->test.resources)); + + /* Remove the resource. The pointer is still valid, but it can't be + * found. + */ + kunit_remove_resource(test, res); + KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources)); + /* We haven't been freed yet. */ + KUNIT_EXPECT_TRUE(test, ctx->is_resource_initialized); + + /* Removing the resource multiple times is valid. */ + kunit_remove_resource(test, res); + KUNIT_EXPECT_TRUE(test, list_empty(&ctx->test.resources)); + /* Despite having been removed twice (from only one reference), the + * resource still has not been freed. + */ + KUNIT_EXPECT_TRUE(test, ctx->is_resource_initialized); + + /* Free the resource. */ + kunit_put_resource(res); + KUNIT_EXPECT_FALSE(test, ctx->is_resource_initialized); +} + static void kunit_resource_test_cleanup_resources(struct kunit *test) { int i; @@ -387,6 +421,7 @@ static struct kunit_case kunit_resource_test_cases[] = { KUNIT_CASE(kunit_resource_test_init_resources), KUNIT_CASE(kunit_resource_test_alloc_resource), KUNIT_CASE(kunit_resource_test_destroy_resource), + KUNIT_CASE(kunit_resource_test_remove_resource), KUNIT_CASE(kunit_resource_test_cleanup_resources), KUNIT_CASE(kunit_resource_test_proper_free_ordering), KUNIT_CASE(kunit_resource_test_static), @@ -435,7 +470,7 @@ static void kunit_log_test(struct kunit *test) KUNIT_EXPECT_NOT_ERR_OR_NULL(test, strstr(suite.log, "along with this.")); #else - KUNIT_EXPECT_PTR_EQ(test, test->log, (char *)NULL); + KUNIT_EXPECT_NULL(test, test->log); #endif } diff --git a/lib/kunit/resource.c b/lib/kunit/resource.c new file mode 100644 index 000000000000..c414df922f34 --- /dev/null +++ b/lib/kunit/resource.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KUnit resource API for test managed resources (allocations, etc.). + * + * Copyright (C) 2022, Google LLC. + * Author: Daniel Latypov <dlatypov@google.com> + */ + +#include <kunit/resource.h> +#include <kunit/test.h> +#include <linux/kref.h> + +/* + * Used for static resources and when a kunit_resource * has been created by + * kunit_alloc_resource(). When an init function is supplied, @data is passed + * into the init function; otherwise, we simply set the resource data field to + * the data value passed in. Doesn't initialize res->should_kfree. + */ +int __kunit_add_resource(struct kunit *test, + kunit_resource_init_t init, + kunit_resource_free_t free, + struct kunit_resource *res, + void *data) +{ + int ret = 0; + unsigned long flags; + + res->free = free; + kref_init(&res->refcount); + + if (init) { + ret = init(res, data); + if (ret) + return ret; + } else { + res->data = data; + } + + spin_lock_irqsave(&test->lock, flags); + list_add_tail(&res->node, &test->resources); + /* refcount for list is established by kref_init() */ + spin_unlock_irqrestore(&test->lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(__kunit_add_resource); + +void kunit_remove_resource(struct kunit *test, struct kunit_resource *res) +{ + unsigned long flags; + bool was_linked; + + spin_lock_irqsave(&test->lock, flags); + was_linked = !list_empty(&res->node); + list_del_init(&res->node); + spin_unlock_irqrestore(&test->lock, flags); + + if (was_linked) + kunit_put_resource(res); +} +EXPORT_SYMBOL_GPL(kunit_remove_resource); + +int kunit_destroy_resource(struct kunit *test, kunit_resource_match_t match, + void *match_data) +{ + struct kunit_resource *res = kunit_find_resource(test, match, + match_data); + + if (!res) + return -ENOENT; + + kunit_remove_resource(test, res); + + /* We have a reference also via _find(); drop it. */ + kunit_put_resource(res); + + return 0; +} +EXPORT_SYMBOL_GPL(kunit_destroy_resource); diff --git a/lib/kunit/test.c b/lib/kunit/test.c index 3bca3bf5c15b..a5053a07409f 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -6,10 +6,10 @@ * Author: Brendan Higgins <brendanhiggins@google.com> */ +#include <kunit/resource.h> #include <kunit/test.h> #include <kunit/test-bug.h> #include <linux/kernel.h> -#include <linux/kref.h> #include <linux/moduleparam.h> #include <linux/sched/debug.h> #include <linux/sched.h> @@ -134,7 +134,7 @@ size_t kunit_suite_num_test_cases(struct kunit_suite *suite) } EXPORT_SYMBOL_GPL(kunit_suite_num_test_cases); -static void kunit_print_subtest_start(struct kunit_suite *suite) +static void kunit_print_suite_start(struct kunit_suite *suite) { kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "# Subtest: %s", suite->name); @@ -179,6 +179,9 @@ enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite) const struct kunit_case *test_case; enum kunit_status status = KUNIT_SKIPPED; + if (suite->suite_init_err) + return KUNIT_FAILURE; + kunit_suite_for_each_test_case(suite, test_case) { if (test_case->status == KUNIT_FAILURE) return KUNIT_FAILURE; @@ -192,7 +195,7 @@ EXPORT_SYMBOL_GPL(kunit_suite_has_succeeded); static size_t kunit_suite_counter = 1; -static void kunit_print_subtest_end(struct kunit_suite *suite) +static void kunit_print_suite_end(struct kunit_suite *suite) { kunit_print_ok_not_ok((void *)suite, false, kunit_suite_has_succeeded(suite), @@ -241,7 +244,7 @@ static void kunit_print_string_stream(struct kunit *test, } static void kunit_fail(struct kunit *test, const struct kunit_loc *loc, - enum kunit_assert_type type, struct kunit_assert *assert, + enum kunit_assert_type type, const struct kunit_assert *assert, const struct va_format *message) { struct string_stream *stream; @@ -281,7 +284,7 @@ static void __noreturn kunit_abort(struct kunit *test) void kunit_do_failed_assertion(struct kunit *test, const struct kunit_loc *loc, enum kunit_assert_type type, - struct kunit_assert *assert, + const struct kunit_assert *assert, const char *fmt, ...) { va_list args; @@ -498,7 +501,16 @@ int kunit_run_tests(struct kunit_suite *suite) struct kunit_result_stats suite_stats = { 0 }; struct kunit_result_stats total_stats = { 0 }; - kunit_print_subtest_start(suite); + if (suite->suite_init) { + suite->suite_init_err = suite->suite_init(suite); + if (suite->suite_init_err) { + kunit_err(suite, KUNIT_SUBTEST_INDENT + "# failed to initialize (%d)", suite->suite_init_err); + goto suite_end; + } + } + + kunit_print_suite_start(suite); kunit_suite_for_each_test_case(suite, test_case) { struct kunit test = { .param_value = NULL, .param_index = 0 }; @@ -551,8 +563,12 @@ int kunit_run_tests(struct kunit_suite *suite) kunit_accumulate_stats(&total_stats, param_stats); } + if (suite->suite_exit) + suite->suite_exit(suite); + kunit_print_suite_stats(suite, suite_stats, total_stats); - kunit_print_subtest_end(suite); +suite_end: + kunit_print_suite_end(suite); return 0; } @@ -562,6 +578,7 @@ static void kunit_init_suite(struct kunit_suite *suite) { kunit_debugfs_create_suite(suite); suite->status_comment[0] = '\0'; + suite->suite_init_err = 0; } int __kunit_test_suites_init(struct kunit_suite * const * const suites) @@ -592,120 +609,6 @@ void __kunit_test_suites_exit(struct kunit_suite **suites) } EXPORT_SYMBOL_GPL(__kunit_test_suites_exit); -/* - * Used for static resources and when a kunit_resource * has been created by - * kunit_alloc_resource(). When an init function is supplied, @data is passed - * into the init function; otherwise, we simply set the resource data field to - * the data value passed in. - */ -int kunit_add_resource(struct kunit *test, - kunit_resource_init_t init, - kunit_resource_free_t free, - struct kunit_resource *res, - void *data) -{ - int ret = 0; - unsigned long flags; - - res->free = free; - kref_init(&res->refcount); - - if (init) { - ret = init(res, data); - if (ret) - return ret; - } else { - res->data = data; - } - - spin_lock_irqsave(&test->lock, flags); - list_add_tail(&res->node, &test->resources); - /* refcount for list is established by kref_init() */ - spin_unlock_irqrestore(&test->lock, flags); - - return ret; -} -EXPORT_SYMBOL_GPL(kunit_add_resource); - -int kunit_add_named_resource(struct kunit *test, - kunit_resource_init_t init, - kunit_resource_free_t free, - struct kunit_resource *res, - const char *name, - void *data) -{ - struct kunit_resource *existing; - - if (!name) - return -EINVAL; - - existing = kunit_find_named_resource(test, name); - if (existing) { - kunit_put_resource(existing); - return -EEXIST; - } - - res->name = name; - - return kunit_add_resource(test, init, free, res, data); -} -EXPORT_SYMBOL_GPL(kunit_add_named_resource); - -struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test, - kunit_resource_init_t init, - kunit_resource_free_t free, - gfp_t internal_gfp, - void *data) -{ - struct kunit_resource *res; - int ret; - - res = kzalloc(sizeof(*res), internal_gfp); - if (!res) - return NULL; - - ret = kunit_add_resource(test, init, free, res, data); - if (!ret) { - /* - * bump refcount for get; kunit_resource_put() should be called - * when done. - */ - kunit_get_resource(res); - return res; - } - return NULL; -} -EXPORT_SYMBOL_GPL(kunit_alloc_and_get_resource); - -void kunit_remove_resource(struct kunit *test, struct kunit_resource *res) -{ - unsigned long flags; - - spin_lock_irqsave(&test->lock, flags); - list_del(&res->node); - spin_unlock_irqrestore(&test->lock, flags); - kunit_put_resource(res); -} -EXPORT_SYMBOL_GPL(kunit_remove_resource); - -int kunit_destroy_resource(struct kunit *test, kunit_resource_match_t match, - void *match_data) -{ - struct kunit_resource *res = kunit_find_resource(test, match, - match_data); - - if (!res) - return -ENOENT; - - kunit_remove_resource(test, res); - - /* We have a reference also via _find(); drop it. */ - kunit_put_resource(res); - - return 0; -} -EXPORT_SYMBOL_GPL(kunit_destroy_resource); - struct kunit_kmalloc_array_params { size_t n; size_t size; diff --git a/lib/list-test.c b/lib/list-test.c index 035ef6597640..d374cf5d1a57 100644 --- a/lib/list-test.c +++ b/lib/list-test.c @@ -804,6 +804,401 @@ static struct kunit_suite list_test_module = { .test_cases = list_test_cases, }; -kunit_test_suites(&list_test_module); +struct hlist_test_struct { + int data; + struct hlist_node list; +}; + +static void hlist_test_init(struct kunit *test) +{ + /* Test the different ways of initialising a list. */ + struct hlist_head list1 = HLIST_HEAD_INIT; + struct hlist_head list2; + HLIST_HEAD(list3); + struct hlist_head *list4; + struct hlist_head *list5; + + INIT_HLIST_HEAD(&list2); + + list4 = kzalloc(sizeof(*list4), GFP_KERNEL | __GFP_NOFAIL); + INIT_HLIST_HEAD(list4); + + list5 = kmalloc(sizeof(*list5), GFP_KERNEL | __GFP_NOFAIL); + memset(list5, 0xFF, sizeof(*list5)); + INIT_HLIST_HEAD(list5); + + KUNIT_EXPECT_TRUE(test, hlist_empty(&list1)); + KUNIT_EXPECT_TRUE(test, hlist_empty(&list2)); + KUNIT_EXPECT_TRUE(test, hlist_empty(&list3)); + KUNIT_EXPECT_TRUE(test, hlist_empty(list4)); + KUNIT_EXPECT_TRUE(test, hlist_empty(list5)); + + kfree(list4); + kfree(list5); +} + +static void hlist_test_unhashed(struct kunit *test) +{ + struct hlist_node a; + HLIST_HEAD(list); + + INIT_HLIST_NODE(&a); + + /* is unhashed by default */ + KUNIT_EXPECT_TRUE(test, hlist_unhashed(&a)); + + hlist_add_head(&a, &list); + + /* is hashed once added to list */ + KUNIT_EXPECT_FALSE(test, hlist_unhashed(&a)); + + hlist_del_init(&a); + + /* is again unhashed after del_init */ + KUNIT_EXPECT_TRUE(test, hlist_unhashed(&a)); +} + +/* Doesn't test concurrency guarantees */ +static void hlist_test_unhashed_lockless(struct kunit *test) +{ + struct hlist_node a; + HLIST_HEAD(list); + + INIT_HLIST_NODE(&a); + + /* is unhashed by default */ + KUNIT_EXPECT_TRUE(test, hlist_unhashed_lockless(&a)); + + hlist_add_head(&a, &list); + + /* is hashed once added to list */ + KUNIT_EXPECT_FALSE(test, hlist_unhashed_lockless(&a)); + + hlist_del_init(&a); + + /* is again unhashed after del_init */ + KUNIT_EXPECT_TRUE(test, hlist_unhashed_lockless(&a)); +} + +static void hlist_test_del(struct kunit *test) +{ + struct hlist_node a, b; + HLIST_HEAD(list); + + hlist_add_head(&a, &list); + hlist_add_behind(&b, &a); + + /* before: [list] -> a -> b */ + hlist_del(&a); + + /* now: [list] -> b */ + KUNIT_EXPECT_PTR_EQ(test, list.first, &b); + KUNIT_EXPECT_PTR_EQ(test, b.pprev, &list.first); +} + +static void hlist_test_del_init(struct kunit *test) +{ + struct hlist_node a, b; + HLIST_HEAD(list); + + hlist_add_head(&a, &list); + hlist_add_behind(&b, &a); + + /* before: [list] -> a -> b */ + hlist_del_init(&a); + + /* now: [list] -> b */ + KUNIT_EXPECT_PTR_EQ(test, list.first, &b); + KUNIT_EXPECT_PTR_EQ(test, b.pprev, &list.first); + + /* a is now initialised */ + KUNIT_EXPECT_PTR_EQ(test, a.next, NULL); + KUNIT_EXPECT_PTR_EQ(test, a.pprev, NULL); +} + +/* Tests all three hlist_add_* functions */ +static void hlist_test_add(struct kunit *test) +{ + struct hlist_node a, b, c, d; + HLIST_HEAD(list); + + hlist_add_head(&a, &list); + hlist_add_head(&b, &list); + hlist_add_before(&c, &a); + hlist_add_behind(&d, &a); + + /* should be [list] -> b -> c -> a -> d */ + KUNIT_EXPECT_PTR_EQ(test, list.first, &b); + + KUNIT_EXPECT_PTR_EQ(test, c.pprev, &(b.next)); + KUNIT_EXPECT_PTR_EQ(test, b.next, &c); + + KUNIT_EXPECT_PTR_EQ(test, a.pprev, &(c.next)); + KUNIT_EXPECT_PTR_EQ(test, c.next, &a); + + KUNIT_EXPECT_PTR_EQ(test, d.pprev, &(a.next)); + KUNIT_EXPECT_PTR_EQ(test, a.next, &d); +} + +/* Tests both hlist_fake() and hlist_add_fake() */ +static void hlist_test_fake(struct kunit *test) +{ + struct hlist_node a; + + INIT_HLIST_NODE(&a); + + /* not fake after init */ + KUNIT_EXPECT_FALSE(test, hlist_fake(&a)); + + hlist_add_fake(&a); + + /* is now fake */ + KUNIT_EXPECT_TRUE(test, hlist_fake(&a)); +} + +static void hlist_test_is_singular_node(struct kunit *test) +{ + struct hlist_node a, b; + HLIST_HEAD(list); + + INIT_HLIST_NODE(&a); + KUNIT_EXPECT_FALSE(test, hlist_is_singular_node(&a, &list)); + + hlist_add_head(&a, &list); + KUNIT_EXPECT_TRUE(test, hlist_is_singular_node(&a, &list)); + + hlist_add_head(&b, &list); + KUNIT_EXPECT_FALSE(test, hlist_is_singular_node(&a, &list)); + KUNIT_EXPECT_FALSE(test, hlist_is_singular_node(&b, &list)); +} + +static void hlist_test_empty(struct kunit *test) +{ + struct hlist_node a; + HLIST_HEAD(list); + + /* list starts off empty */ + KUNIT_EXPECT_TRUE(test, hlist_empty(&list)); + + hlist_add_head(&a, &list); + + /* list is no longer empty */ + KUNIT_EXPECT_FALSE(test, hlist_empty(&list)); +} + +static void hlist_test_move_list(struct kunit *test) +{ + struct hlist_node a; + HLIST_HEAD(list1); + HLIST_HEAD(list2); + + hlist_add_head(&a, &list1); + + KUNIT_EXPECT_FALSE(test, hlist_empty(&list1)); + KUNIT_EXPECT_TRUE(test, hlist_empty(&list2)); + hlist_move_list(&list1, &list2); + KUNIT_EXPECT_TRUE(test, hlist_empty(&list1)); + KUNIT_EXPECT_FALSE(test, hlist_empty(&list2)); + +} + +static void hlist_test_entry(struct kunit *test) +{ + struct hlist_test_struct test_struct; + + KUNIT_EXPECT_PTR_EQ(test, &test_struct, + hlist_entry(&(test_struct.list), + struct hlist_test_struct, list)); +} + +static void hlist_test_entry_safe(struct kunit *test) +{ + struct hlist_test_struct test_struct; + + KUNIT_EXPECT_PTR_EQ(test, &test_struct, + hlist_entry_safe(&(test_struct.list), + struct hlist_test_struct, list)); + + KUNIT_EXPECT_PTR_EQ(test, NULL, + hlist_entry_safe((struct hlist_node *)NULL, + struct hlist_test_struct, list)); +} + +static void hlist_test_for_each(struct kunit *test) +{ + struct hlist_node entries[3], *cur; + HLIST_HEAD(list); + int i = 0; + + hlist_add_head(&entries[0], &list); + hlist_add_behind(&entries[1], &entries[0]); + hlist_add_behind(&entries[2], &entries[1]); + + hlist_for_each(cur, &list) { + KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]); + i++; + } + + KUNIT_EXPECT_EQ(test, i, 3); +} + + +static void hlist_test_for_each_safe(struct kunit *test) +{ + struct hlist_node entries[3], *cur, *n; + HLIST_HEAD(list); + int i = 0; + + hlist_add_head(&entries[0], &list); + hlist_add_behind(&entries[1], &entries[0]); + hlist_add_behind(&entries[2], &entries[1]); + + hlist_for_each_safe(cur, n, &list) { + KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]); + hlist_del(&entries[i]); + i++; + } + + KUNIT_EXPECT_EQ(test, i, 3); + KUNIT_EXPECT_TRUE(test, hlist_empty(&list)); +} + +static void hlist_test_for_each_entry(struct kunit *test) +{ + struct hlist_test_struct entries[5], *cur; + HLIST_HEAD(list); + int i = 0; + + entries[0].data = 0; + hlist_add_head(&entries[0].list, &list); + for (i = 1; i < 5; ++i) { + entries[i].data = i; + hlist_add_behind(&entries[i].list, &entries[i-1].list); + } + + i = 0; + + hlist_for_each_entry(cur, &list, list) { + KUNIT_EXPECT_EQ(test, cur->data, i); + i++; + } + + KUNIT_EXPECT_EQ(test, i, 5); +} + +static void hlist_test_for_each_entry_continue(struct kunit *test) +{ + struct hlist_test_struct entries[5], *cur; + HLIST_HEAD(list); + int i = 0; + + entries[0].data = 0; + hlist_add_head(&entries[0].list, &list); + for (i = 1; i < 5; ++i) { + entries[i].data = i; + hlist_add_behind(&entries[i].list, &entries[i-1].list); + } + + /* We skip the first (zero-th) entry. */ + i = 1; + + cur = &entries[0]; + hlist_for_each_entry_continue(cur, list) { + KUNIT_EXPECT_EQ(test, cur->data, i); + /* Stamp over the entry. */ + cur->data = 42; + i++; + } + + KUNIT_EXPECT_EQ(test, i, 5); + /* The first entry was not visited. */ + KUNIT_EXPECT_EQ(test, entries[0].data, 0); + /* The second (and presumably others), were. */ + KUNIT_EXPECT_EQ(test, entries[1].data, 42); +} + +static void hlist_test_for_each_entry_from(struct kunit *test) +{ + struct hlist_test_struct entries[5], *cur; + HLIST_HEAD(list); + int i = 0; + + entries[0].data = 0; + hlist_add_head(&entries[0].list, &list); + for (i = 1; i < 5; ++i) { + entries[i].data = i; + hlist_add_behind(&entries[i].list, &entries[i-1].list); + } + + i = 0; + + cur = &entries[0]; + hlist_for_each_entry_from(cur, list) { + KUNIT_EXPECT_EQ(test, cur->data, i); + /* Stamp over the entry. */ + cur->data = 42; + i++; + } + + KUNIT_EXPECT_EQ(test, i, 5); + /* The first entry was visited. */ + KUNIT_EXPECT_EQ(test, entries[0].data, 42); +} + +static void hlist_test_for_each_entry_safe(struct kunit *test) +{ + struct hlist_test_struct entries[5], *cur; + struct hlist_node *tmp_node; + HLIST_HEAD(list); + int i = 0; + + entries[0].data = 0; + hlist_add_head(&entries[0].list, &list); + for (i = 1; i < 5; ++i) { + entries[i].data = i; + hlist_add_behind(&entries[i].list, &entries[i-1].list); + } + + i = 0; + + hlist_for_each_entry_safe(cur, tmp_node, &list, list) { + KUNIT_EXPECT_EQ(test, cur->data, i); + hlist_del(&cur->list); + i++; + } + + KUNIT_EXPECT_EQ(test, i, 5); + KUNIT_EXPECT_TRUE(test, hlist_empty(&list)); +} + + +static struct kunit_case hlist_test_cases[] = { + KUNIT_CASE(hlist_test_init), + KUNIT_CASE(hlist_test_unhashed), + KUNIT_CASE(hlist_test_unhashed_lockless), + KUNIT_CASE(hlist_test_del), + KUNIT_CASE(hlist_test_del_init), + KUNIT_CASE(hlist_test_add), + KUNIT_CASE(hlist_test_fake), + KUNIT_CASE(hlist_test_is_singular_node), + KUNIT_CASE(hlist_test_empty), + KUNIT_CASE(hlist_test_move_list), + KUNIT_CASE(hlist_test_entry), + KUNIT_CASE(hlist_test_entry_safe), + KUNIT_CASE(hlist_test_for_each), + KUNIT_CASE(hlist_test_for_each_safe), + KUNIT_CASE(hlist_test_for_each_entry), + KUNIT_CASE(hlist_test_for_each_entry_continue), + KUNIT_CASE(hlist_test_for_each_entry_from), + KUNIT_CASE(hlist_test_for_each_entry_safe), + {}, +}; + +static struct kunit_suite hlist_test_module = { + .name = "hlist", + .test_cases = hlist_test_cases, +}; + +kunit_test_suites(&list_test_module, &hlist_test_module); MODULE_LICENSE("GPL v2"); diff --git a/lib/lockref.c b/lib/lockref.c index 5b34bbd3eba8..45e93ece8ba0 100644 --- a/lib/lockref.c +++ b/lib/lockref.c @@ -14,12 +14,11 @@ BUILD_BUG_ON(sizeof(old) != 8); \ old.lock_count = READ_ONCE(lockref->lock_count); \ while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ - struct lockref new = old, prev = old; \ + struct lockref new = old; \ CODE \ - old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \ - old.lock_count, \ - new.lock_count); \ - if (likely(old.lock_count == prev.lock_count)) { \ + if (likely(try_cmpxchg64_relaxed(&lockref->lock_count, \ + &old.lock_count, \ + new.lock_count))) { \ SUCCESS; \ } \ if (!--retry) \ @@ -112,31 +111,6 @@ int lockref_put_not_zero(struct lockref *lockref) EXPORT_SYMBOL(lockref_put_not_zero); /** - * lockref_get_or_lock - Increments count unless the count is 0 or dead - * @lockref: pointer to lockref structure - * Return: 1 if count updated successfully or 0 if count was zero - * and we got the lock instead. - */ -int lockref_get_or_lock(struct lockref *lockref) -{ - CMPXCHG_LOOP( - new.count++; - if (old.count <= 0) - break; - , - return 1; - ); - - spin_lock(&lockref->lock); - if (lockref->count <= 0) - return 0; - lockref->count++; - spin_unlock(&lockref->lock); - return 1; -} -EXPORT_SYMBOL(lockref_get_or_lock); - -/** * lockref_put_return - Decrement reference count if possible * @lockref: pointer to lockref structure * diff --git a/lib/memneq.c b/lib/memneq.c new file mode 100644 index 000000000000..fb11608b1ec1 --- /dev/null +++ b/lib/memneq.c @@ -0,0 +1,176 @@ +/* + * Constant-time equality testing of memory regions. + * + * Authors: + * + * James Yonan <james@openvpn.net> + * Daniel Borkmann <dborkman@redhat.com> + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * BSD LICENSE + * + * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of OpenVPN Technologies nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <crypto/algapi.h> +#include <asm/unaligned.h> + +#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ + +/* Generic path for arbitrary size */ +static inline unsigned long +__crypto_memneq_generic(const void *a, const void *b, size_t size) +{ + unsigned long neq = 0; + +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + while (size >= sizeof(unsigned long)) { + neq |= get_unaligned((unsigned long *)a) ^ + get_unaligned((unsigned long *)b); + OPTIMIZER_HIDE_VAR(neq); + a += sizeof(unsigned long); + b += sizeof(unsigned long); + size -= sizeof(unsigned long); + } +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + while (size > 0) { + neq |= *(unsigned char *)a ^ *(unsigned char *)b; + OPTIMIZER_HIDE_VAR(neq); + a += 1; + b += 1; + size -= 1; + } + return neq; +} + +/* Loop-free fast-path for frequently used 16-byte size */ +static inline unsigned long __crypto_memneq_16(const void *a, const void *b) +{ + unsigned long neq = 0; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (sizeof(unsigned long) == 8) { + neq |= get_unaligned((unsigned long *)a) ^ + get_unaligned((unsigned long *)b); + OPTIMIZER_HIDE_VAR(neq); + neq |= get_unaligned((unsigned long *)(a + 8)) ^ + get_unaligned((unsigned long *)(b + 8)); + OPTIMIZER_HIDE_VAR(neq); + } else if (sizeof(unsigned int) == 4) { + neq |= get_unaligned((unsigned int *)a) ^ + get_unaligned((unsigned int *)b); + OPTIMIZER_HIDE_VAR(neq); + neq |= get_unaligned((unsigned int *)(a + 4)) ^ + get_unaligned((unsigned int *)(b + 4)); + OPTIMIZER_HIDE_VAR(neq); + neq |= get_unaligned((unsigned int *)(a + 8)) ^ + get_unaligned((unsigned int *)(b + 8)); + OPTIMIZER_HIDE_VAR(neq); + neq |= get_unaligned((unsigned int *)(a + 12)) ^ + get_unaligned((unsigned int *)(b + 12)); + OPTIMIZER_HIDE_VAR(neq); + } else +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + { + neq |= *(unsigned char *)(a) ^ *(unsigned char *)(b); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+1) ^ *(unsigned char *)(b+1); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+2) ^ *(unsigned char *)(b+2); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+3) ^ *(unsigned char *)(b+3); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+4) ^ *(unsigned char *)(b+4); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+5) ^ *(unsigned char *)(b+5); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+6) ^ *(unsigned char *)(b+6); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+7) ^ *(unsigned char *)(b+7); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+8) ^ *(unsigned char *)(b+8); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+9) ^ *(unsigned char *)(b+9); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+10) ^ *(unsigned char *)(b+10); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+11) ^ *(unsigned char *)(b+11); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+12) ^ *(unsigned char *)(b+12); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+13) ^ *(unsigned char *)(b+13); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+14) ^ *(unsigned char *)(b+14); + OPTIMIZER_HIDE_VAR(neq); + neq |= *(unsigned char *)(a+15) ^ *(unsigned char *)(b+15); + OPTIMIZER_HIDE_VAR(neq); + } + + return neq; +} + +/* Compare two areas of memory without leaking timing information, + * and with special optimizations for common sizes. Users should + * not call this function directly, but should instead use + * crypto_memneq defined in crypto/algapi.h. + */ +noinline unsigned long __crypto_memneq(const void *a, const void *b, + size_t size) +{ + switch (size) { + case 16: + return __crypto_memneq_16(a, b); + default: + return __crypto_memneq_generic(a, b, size); + } +} +EXPORT_SYMBOL(__crypto_memneq); + +#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */ diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c index 199ab201d501..d01aec6ae15c 100644 --- a/lib/nmi_backtrace.c +++ b/lib/nmi_backtrace.c @@ -99,7 +99,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs) * Allow nested NMI backtraces while serializing * against other CPUs. */ - printk_cpu_lock_irqsave(flags); + printk_cpu_sync_get_irqsave(flags); if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) { pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n", cpu, (void *)instruction_pointer(regs)); @@ -110,7 +110,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs) else dump_stack(); } - printk_cpu_unlock_irqrestore(flags); + printk_cpu_sync_put_irqrestore(flags); cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); return true; } diff --git a/lib/nodemask.c b/lib/nodemask.c index 3aa454c54c0d..e22647f5181b 100644 --- a/lib/nodemask.c +++ b/lib/nodemask.c @@ -3,9 +3,9 @@ #include <linux/module.h> #include <linux/random.h> -int __next_node_in(int node, const nodemask_t *srcp) +unsigned int __next_node_in(int node, const nodemask_t *srcp) { - int ret = __next_node(node, srcp); + unsigned int ret = __next_node(node, srcp); if (ret == MAX_NUMNODES) ret = __first_node(srcp); diff --git a/lib/polynomial.c b/lib/polynomial.c new file mode 100644 index 000000000000..66d383445fec --- /dev/null +++ b/lib/polynomial.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generic polynomial calculation using integer coefficients. + * + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Authors: + * Maxim Kaurkin <maxim.kaurkin@baikalelectronics.ru> + * Serge Semin <Sergey.Semin@baikalelectronics.ru> + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/polynomial.h> + +/* + * Originally this was part of drivers/hwmon/bt1-pvt.c. + * There the following conversion is used and should serve as an example here: + * + * The original translation formulae of the temperature (in degrees of Celsius) + * to PVT data and vice-versa are following: + * + * N = 1.8322e-8*(T^4) + 2.343e-5*(T^3) + 8.7018e-3*(T^2) + 3.9269*(T^1) + + * 1.7204e2 + * T = -1.6743e-11*(N^4) + 8.1542e-8*(N^3) + -1.8201e-4*(N^2) + + * 3.1020e-1*(N^1) - 4.838e1 + * + * where T = [-48.380, 147.438]C and N = [0, 1023]. + * + * They must be accordingly altered to be suitable for the integer arithmetics. + * The technique is called 'factor redistribution', which just makes sure the + * multiplications and divisions are made so to have a result of the operations + * within the integer numbers limit. In addition we need to translate the + * formulae to accept millidegrees of Celsius. Here what they look like after + * the alterations: + * + * N = (18322e-20*(T^4) + 2343e-13*(T^3) + 87018e-9*(T^2) + 39269e-3*T + + * 17204e2) / 1e4 + * T = -16743e-12*(D^4) + 81542e-9*(D^3) - 182010e-6*(D^2) + 310200e-3*D - + * 48380 + * where T = [-48380, 147438] mC and N = [0, 1023]. + * + * static const struct polynomial poly_temp_to_N = { + * .total_divider = 10000, + * .terms = { + * {4, 18322, 10000, 10000}, + * {3, 2343, 10000, 10}, + * {2, 87018, 10000, 10}, + * {1, 39269, 1000, 1}, + * {0, 1720400, 1, 1} + * } + * }; + * + * static const struct polynomial poly_N_to_temp = { + * .total_divider = 1, + * .terms = { + * {4, -16743, 1000, 1}, + * {3, 81542, 1000, 1}, + * {2, -182010, 1000, 1}, + * {1, 310200, 1000, 1}, + * {0, -48380, 1, 1} + * } + * }; + */ + +/** + * polynomial_calc - calculate a polynomial using integer arithmetic + * + * @poly: pointer to the descriptor of the polynomial + * @data: input value of the polynimal + * + * Calculate the result of a polynomial using only integer arithmetic. For + * this to work without too much loss of precision the coefficients has to + * be altered. This is called factor redistribution. + * + * Returns the result of the polynomial calculation. + */ +long polynomial_calc(const struct polynomial *poly, long data) +{ + const struct polynomial_term *term = poly->terms; + long total_divider = poly->total_divider ?: 1; + long tmp, ret = 0; + int deg; + + /* + * Here is the polynomial calculation function, which performs the + * redistributed terms calculations. It's pretty straightforward. + * We walk over each degree term up to the free one, and perform + * the redistributed multiplication of the term coefficient, its + * divider (as for the rationale fraction representation), data + * power and the rational fraction divider leftover. Then all of + * this is collected in a total sum variable, which value is + * normalized by the total divider before being returned. + */ + do { + tmp = term->coef; + for (deg = 0; deg < term->deg; ++deg) + tmp = mult_frac(tmp, data, term->divider); + ret += tmp / term->divider_leftover; + } while ((term++)->deg); + + return ret / total_divider; +} +EXPORT_SYMBOL_GPL(polynomial_calc); + +MODULE_DESCRIPTION("Generic polynomial calculations"); +MODULE_LICENSE("GPL"); diff --git a/lib/random32.c b/lib/random32.c index 976632003ec6..d5d9029362cb 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -245,25 +245,13 @@ static struct prandom_test2 { { 407983964U, 921U, 728767059U }, }; -static u32 __extract_hwseed(void) -{ - unsigned int val = 0; - - (void)(arch_get_random_seed_int(&val) || - arch_get_random_int(&val)); - - return val; -} - -static void prandom_seed_early(struct rnd_state *state, u32 seed, - bool mix_with_hwseed) +static void prandom_state_selftest_seed(struct rnd_state *state, u32 seed) { #define LCG(x) ((x) * 69069U) /* super-duper LCG */ -#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0) - state->s1 = __seed(HWSEED() ^ LCG(seed), 2U); - state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U); - state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U); - state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U); + state->s1 = __seed(LCG(seed), 2U); + state->s2 = __seed(LCG(state->s1), 8U); + state->s3 = __seed(LCG(state->s2), 16U); + state->s4 = __seed(LCG(state->s3), 128U); } static int __init prandom_state_selftest(void) @@ -274,7 +262,7 @@ static int __init prandom_state_selftest(void) for (i = 0; i < ARRAY_SIZE(test1); i++) { struct rnd_state state; - prandom_seed_early(&state, test1[i].seed, false); + prandom_state_selftest_seed(&state, test1[i].seed); prandom_warmup(&state); if (test1[i].result != prandom_u32_state(&state)) @@ -289,7 +277,7 @@ static int __init prandom_state_selftest(void) for (i = 0; i < ARRAY_SIZE(test2); i++) { struct rnd_state state; - prandom_seed_early(&state, test2[i].seed, false); + prandom_state_selftest_seed(&state, test2[i].seed); prandom_warmup(&state); for (j = 0; j < test2[i].iteration - 1; j++) @@ -310,324 +298,3 @@ static int __init prandom_state_selftest(void) } core_initcall(prandom_state_selftest); #endif - -/* - * The prandom_u32() implementation is now completely separate from the - * prandom_state() functions, which are retained (for now) for compatibility. - * - * Because of (ab)use in the networking code for choosing random TCP/UDP port - * numbers, which open DoS possibilities if guessable, we want something - * stronger than a standard PRNG. But the performance requirements of - * the network code do not allow robust crypto for this application. - * - * So this is a homebrew Junior Spaceman implementation, based on the - * lowest-latency trustworthy crypto primitive available, SipHash. - * (The authors of SipHash have not been consulted about this abuse of - * their work.) - * - * Standard SipHash-2-4 uses 2n+4 rounds to hash n words of input to - * one word of output. This abbreviated version uses 2 rounds per word - * of output. - */ - -struct siprand_state { - unsigned long v0; - unsigned long v1; - unsigned long v2; - unsigned long v3; -}; - -static DEFINE_PER_CPU(struct siprand_state, net_rand_state) __latent_entropy; -DEFINE_PER_CPU(unsigned long, net_rand_noise); -EXPORT_PER_CPU_SYMBOL(net_rand_noise); - -/* - * This is the core CPRNG function. As "pseudorandom", this is not used - * for truly valuable things, just intended to be a PITA to guess. - * For maximum speed, we do just two SipHash rounds per word. This is - * the same rate as 4 rounds per 64 bits that SipHash normally uses, - * so hopefully it's reasonably secure. - * - * There are two changes from the official SipHash finalization: - * - We omit some constants XORed with v2 in the SipHash spec as irrelevant; - * they are there only to make the output rounds distinct from the input - * rounds, and this application has no input rounds. - * - Rather than returning v0^v1^v2^v3, return v1+v3. - * If you look at the SipHash round, the last operation on v3 is - * "v3 ^= v0", so "v0 ^ v3" just undoes that, a waste of time. - * Likewise "v1 ^= v2". (The rotate of v2 makes a difference, but - * it still cancels out half of the bits in v2 for no benefit.) - * Second, since the last combining operation was xor, continue the - * pattern of alternating xor/add for a tiny bit of extra non-linearity. - */ -static inline u32 siprand_u32(struct siprand_state *s) -{ - unsigned long v0 = s->v0, v1 = s->v1, v2 = s->v2, v3 = s->v3; - unsigned long n = raw_cpu_read(net_rand_noise); - - v3 ^= n; - PRND_SIPROUND(v0, v1, v2, v3); - PRND_SIPROUND(v0, v1, v2, v3); - v0 ^= n; - s->v0 = v0; s->v1 = v1; s->v2 = v2; s->v3 = v3; - return v1 + v3; -} - - -/** - * prandom_u32 - pseudo random number generator - * - * A 32 bit pseudo-random number is generated using a fast - * algorithm suitable for simulation. This algorithm is NOT - * considered safe for cryptographic use. - */ -u32 prandom_u32(void) -{ - struct siprand_state *state = get_cpu_ptr(&net_rand_state); - u32 res = siprand_u32(state); - - put_cpu_ptr(&net_rand_state); - return res; -} -EXPORT_SYMBOL(prandom_u32); - -/** - * prandom_bytes - get the requested number of pseudo-random bytes - * @buf: where to copy the pseudo-random bytes to - * @bytes: the requested number of bytes - */ -void prandom_bytes(void *buf, size_t bytes) -{ - struct siprand_state *state = get_cpu_ptr(&net_rand_state); - u8 *ptr = buf; - - while (bytes >= sizeof(u32)) { - put_unaligned(siprand_u32(state), (u32 *)ptr); - ptr += sizeof(u32); - bytes -= sizeof(u32); - } - - if (bytes > 0) { - u32 rem = siprand_u32(state); - - do { - *ptr++ = (u8)rem; - rem >>= BITS_PER_BYTE; - } while (--bytes > 0); - } - put_cpu_ptr(&net_rand_state); -} -EXPORT_SYMBOL(prandom_bytes); - -/** - * prandom_seed - add entropy to pseudo random number generator - * @entropy: entropy value - * - * Add some additional seed material to the prandom pool. - * The "entropy" is actually our IP address (the only caller is - * the network code), not for unpredictability, but to ensure that - * different machines are initialized differently. - */ -void prandom_seed(u32 entropy) -{ - int i; - - add_device_randomness(&entropy, sizeof(entropy)); - - for_each_possible_cpu(i) { - struct siprand_state *state = per_cpu_ptr(&net_rand_state, i); - unsigned long v0 = state->v0, v1 = state->v1; - unsigned long v2 = state->v2, v3 = state->v3; - - do { - v3 ^= entropy; - PRND_SIPROUND(v0, v1, v2, v3); - PRND_SIPROUND(v0, v1, v2, v3); - v0 ^= entropy; - } while (unlikely(!v0 || !v1 || !v2 || !v3)); - - WRITE_ONCE(state->v0, v0); - WRITE_ONCE(state->v1, v1); - WRITE_ONCE(state->v2, v2); - WRITE_ONCE(state->v3, v3); - } -} -EXPORT_SYMBOL(prandom_seed); - -/* - * Generate some initially weak seeding values to allow - * the prandom_u32() engine to be started. - */ -static int __init prandom_init_early(void) -{ - int i; - unsigned long v0, v1, v2, v3; - - if (!arch_get_random_long(&v0)) - v0 = jiffies; - if (!arch_get_random_long(&v1)) - v1 = random_get_entropy(); - v2 = v0 ^ PRND_K0; - v3 = v1 ^ PRND_K1; - - for_each_possible_cpu(i) { - struct siprand_state *state; - - v3 ^= i; - PRND_SIPROUND(v0, v1, v2, v3); - PRND_SIPROUND(v0, v1, v2, v3); - v0 ^= i; - - state = per_cpu_ptr(&net_rand_state, i); - state->v0 = v0; state->v1 = v1; - state->v2 = v2; state->v3 = v3; - } - - return 0; -} -core_initcall(prandom_init_early); - - -/* Stronger reseeding when available, and periodically thereafter. */ -static void prandom_reseed(struct timer_list *unused); - -static DEFINE_TIMER(seed_timer, prandom_reseed); - -static void prandom_reseed(struct timer_list *unused) -{ - unsigned long expires; - int i; - - /* - * Reinitialize each CPU's PRNG with 128 bits of key. - * No locking on the CPUs, but then somewhat random results are, - * well, expected. - */ - for_each_possible_cpu(i) { - struct siprand_state *state; - unsigned long v0 = get_random_long(), v2 = v0 ^ PRND_K0; - unsigned long v1 = get_random_long(), v3 = v1 ^ PRND_K1; -#if BITS_PER_LONG == 32 - int j; - - /* - * On 32-bit machines, hash in two extra words to - * approximate 128-bit key length. Not that the hash - * has that much security, but this prevents a trivial - * 64-bit brute force. - */ - for (j = 0; j < 2; j++) { - unsigned long m = get_random_long(); - - v3 ^= m; - PRND_SIPROUND(v0, v1, v2, v3); - PRND_SIPROUND(v0, v1, v2, v3); - v0 ^= m; - } -#endif - /* - * Probably impossible in practice, but there is a - * theoretical risk that a race between this reseeding - * and the target CPU writing its state back could - * create the all-zero SipHash fixed point. - * - * To ensure that never happens, ensure the state - * we write contains no zero words. - */ - state = per_cpu_ptr(&net_rand_state, i); - WRITE_ONCE(state->v0, v0 ? v0 : -1ul); - WRITE_ONCE(state->v1, v1 ? v1 : -1ul); - WRITE_ONCE(state->v2, v2 ? v2 : -1ul); - WRITE_ONCE(state->v3, v3 ? v3 : -1ul); - } - - /* reseed every ~60 seconds, in [40 .. 80) interval with slack */ - expires = round_jiffies(jiffies + 40 * HZ + prandom_u32_max(40 * HZ)); - mod_timer(&seed_timer, expires); -} - -/* - * The random ready callback can be called from almost any interrupt. - * To avoid worrying about whether it's safe to delay that interrupt - * long enough to seed all CPUs, just schedule an immediate timer event. - */ -static int prandom_timer_start(struct notifier_block *nb, - unsigned long action, void *data) -{ - mod_timer(&seed_timer, jiffies); - return 0; -} - -#ifdef CONFIG_RANDOM32_SELFTEST -/* Principle: True 32-bit random numbers will all have 16 differing bits on - * average. For each 32-bit number, there are 601M numbers differing by 16 - * bits, and 89% of the numbers differ by at least 12 bits. Note that more - * than 16 differing bits also implies a correlation with inverted bits. Thus - * we take 1024 random numbers and compare each of them to the other ones, - * counting the deviation of correlated bits to 16. Constants report 32, - * counters 32-log2(TEST_SIZE), and pure randoms, around 6 or lower. With the - * u32 total, TEST_SIZE may be as large as 4096 samples. - */ -#define TEST_SIZE 1024 -static int __init prandom32_state_selftest(void) -{ - unsigned int x, y, bits, samples; - u32 xor, flip; - u32 total; - u32 *data; - - data = kmalloc(sizeof(*data) * TEST_SIZE, GFP_KERNEL); - if (!data) - return 0; - - for (samples = 0; samples < TEST_SIZE; samples++) - data[samples] = prandom_u32(); - - flip = total = 0; - for (x = 0; x < samples; x++) { - for (y = 0; y < samples; y++) { - if (x == y) - continue; - xor = data[x] ^ data[y]; - flip |= xor; - bits = hweight32(xor); - total += (bits - 16) * (bits - 16); - } - } - - /* We'll return the average deviation as 2*sqrt(corr/samples), which - * is also sqrt(4*corr/samples) which provides a better resolution. - */ - bits = int_sqrt(total / (samples * (samples - 1)) * 4); - if (bits > 6) - pr_warn("prandom32: self test failed (at least %u bits" - " correlated, fixed_mask=%#x fixed_value=%#x\n", - bits, ~flip, data[0] & ~flip); - else - pr_info("prandom32: self test passed (less than %u bits" - " correlated)\n", - bits+1); - kfree(data); - return 0; -} -core_initcall(prandom32_state_selftest); -#endif /* CONFIG_RANDOM32_SELFTEST */ - -/* - * Start periodic full reseeding as soon as strong - * random numbers are available. - */ -static int __init prandom_init_late(void) -{ - static struct notifier_block random_ready = { - .notifier_call = prandom_timer_start - }; - int ret = register_random_ready_notifier(&random_ready); - - if (ret == -EALREADY) { - prandom_timer_start(&random_ready, 0, NULL); - ret = 0; - } - return ret; -} -late_initcall(prandom_init_late); diff --git a/lib/sbitmap.c b/lib/sbitmap.c index ae4fd4de9ebe..29eb0484215a 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -528,7 +528,7 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, sbitmap_deferred_clear(map); if (map->word == (1UL << (map_depth - 1)) - 1) - continue; + goto next; nr = find_first_zero_bit(&map->word, map_depth); if (nr + nr_tags <= map_depth) { @@ -539,6 +539,8 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, get_mask = ((1UL << map_tags) - 1) << nr; do { val = READ_ONCE(map->word); + if ((val & ~get_mask) != val) + goto next; ret = atomic_long_cmpxchg(ptr, val, get_mask | val); } while (ret != val); get_mask = (get_mask & ~ret) >> nr; @@ -549,6 +551,7 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, return get_mask; } } +next: /* Jump to next index. */ if (++index >= sb->map_nr) index = 0; diff --git a/lib/siphash.c b/lib/siphash.c index 72b9068ab57b..15bc5b6f368c 100644 --- a/lib/siphash.c +++ b/lib/siphash.c @@ -1,6 +1,5 @@ -/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. - * - * This file is provided under a dual BSD/GPLv2 license. +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* Copyright (C) 2016-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. * * SipHash: a fast short-input PRF * https://131002.net/siphash/ @@ -18,19 +17,13 @@ #include <asm/word-at-a-time.h> #endif -#define SIPROUND \ - do { \ - v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \ - v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \ - v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \ - v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \ - } while (0) +#define SIPROUND SIPHASH_PERMUTATION(v0, v1, v2, v3) #define PREAMBLE(len) \ - u64 v0 = 0x736f6d6570736575ULL; \ - u64 v1 = 0x646f72616e646f6dULL; \ - u64 v2 = 0x6c7967656e657261ULL; \ - u64 v3 = 0x7465646279746573ULL; \ + u64 v0 = SIPHASH_CONST_0; \ + u64 v1 = SIPHASH_CONST_1; \ + u64 v2 = SIPHASH_CONST_2; \ + u64 v3 = SIPHASH_CONST_3; \ u64 b = ((u64)(len)) << 56; \ v3 ^= key->key[1]; \ v2 ^= key->key[0]; \ @@ -389,19 +382,13 @@ u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third, } EXPORT_SYMBOL(hsiphash_4u32); #else -#define HSIPROUND \ - do { \ - v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \ - v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \ - v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \ - v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \ - } while (0) +#define HSIPROUND HSIPHASH_PERMUTATION(v0, v1, v2, v3) #define HPREAMBLE(len) \ - u32 v0 = 0; \ - u32 v1 = 0; \ - u32 v2 = 0x6c796765U; \ - u32 v3 = 0x74656462U; \ + u32 v0 = HSIPHASH_CONST_0; \ + u32 v1 = HSIPHASH_CONST_1; \ + u32 v2 = HSIPHASH_CONST_2; \ + u32 v3 = HSIPHASH_CONST_3; \ u32 b = ((u32)(len)) << 24; \ v3 ^= key->key[1]; \ v2 ^= key->key[0]; \ diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c index 8662dc6cb509..7a0564d7cb7a 100644 --- a/lib/slub_kunit.c +++ b/lib/slub_kunit.c @@ -12,7 +12,7 @@ static int slab_errors; static void test_clobber_zone(struct kunit *test) { struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_alloc", 64, 0, - SLAB_RED_ZONE, NULL); + SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL); u8 *p = kmem_cache_alloc(s, GFP_KERNEL); kasan_disable_current(); @@ -30,7 +30,7 @@ static void test_clobber_zone(struct kunit *test) static void test_next_pointer(struct kunit *test) { struct kmem_cache *s = kmem_cache_create("TestSlub_next_ptr_free", 64, 0, - SLAB_POISON, NULL); + SLAB_POISON|SLAB_NO_USER_FLAGS, NULL); u8 *p = kmem_cache_alloc(s, GFP_KERNEL); unsigned long tmp; unsigned long *ptr_addr; @@ -75,7 +75,7 @@ static void test_next_pointer(struct kunit *test) static void test_first_word(struct kunit *test) { struct kmem_cache *s = kmem_cache_create("TestSlub_1th_word_free", 64, 0, - SLAB_POISON, NULL); + SLAB_POISON|SLAB_NO_USER_FLAGS, NULL); u8 *p = kmem_cache_alloc(s, GFP_KERNEL); kmem_cache_free(s, p); @@ -90,7 +90,7 @@ static void test_first_word(struct kunit *test) static void test_clobber_50th_byte(struct kunit *test) { struct kmem_cache *s = kmem_cache_create("TestSlub_50th_word_free", 64, 0, - SLAB_POISON, NULL); + SLAB_POISON|SLAB_NO_USER_FLAGS, NULL); u8 *p = kmem_cache_alloc(s, GFP_KERNEL); kmem_cache_free(s, p); @@ -106,7 +106,7 @@ static void test_clobber_50th_byte(struct kunit *test) static void test_clobber_redzone_free(struct kunit *test) { struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_free", 64, 0, - SLAB_RED_ZONE, NULL); + SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL); u8 *p = kmem_cache_alloc(s, GFP_KERNEL); kasan_disable_current(); diff --git a/lib/stackdepot.c b/lib/stackdepot.c index bf5ba9af0500..5ca0d086ef4a 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -66,6 +66,9 @@ struct stack_record { unsigned long entries[]; /* Variable-sized array of entries. */ }; +static bool __stack_depot_want_early_init __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT); +static bool __stack_depot_early_init_passed __initdata; + static void *stack_slabs[STACK_ALLOC_MAX_SLABS]; static int depot_index; @@ -162,38 +165,58 @@ static int __init is_stack_depot_disabled(char *str) } early_param("stack_depot_disable", is_stack_depot_disabled); -/* - * __ref because of memblock_alloc(), which will not be actually called after - * the __init code is gone, because at that point slab_is_available() is true - */ -__ref int stack_depot_init(void) +void __init stack_depot_want_early_init(void) +{ + /* Too late to request early init now */ + WARN_ON(__stack_depot_early_init_passed); + + __stack_depot_want_early_init = true; +} + +int __init stack_depot_early_init(void) +{ + size_t size; + + /* This is supposed to be called only once, from mm_init() */ + if (WARN_ON(__stack_depot_early_init_passed)) + return 0; + + __stack_depot_early_init_passed = true; + + if (!__stack_depot_want_early_init || stack_depot_disable) + return 0; + + size = (STACK_HASH_SIZE * sizeof(struct stack_record *)); + pr_info("Stack Depot early init allocating hash table with memblock_alloc, %zu bytes\n", + size); + stack_table = memblock_alloc(size, SMP_CACHE_BYTES); + + if (!stack_table) { + pr_err("Stack Depot hash table allocation failed, disabling\n"); + stack_depot_disable = true; + return -ENOMEM; + } + + return 0; +} + +int stack_depot_init(void) { static DEFINE_MUTEX(stack_depot_init_mutex); + int ret = 0; mutex_lock(&stack_depot_init_mutex); if (!stack_depot_disable && !stack_table) { - size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *)); - int i; - - if (slab_is_available()) { - pr_info("Stack Depot allocating hash table with kvmalloc\n"); - stack_table = kvmalloc(size, GFP_KERNEL); - } else { - pr_info("Stack Depot allocating hash table with memblock_alloc\n"); - stack_table = memblock_alloc(size, SMP_CACHE_BYTES); - } - if (stack_table) { - for (i = 0; i < STACK_HASH_SIZE; i++) - stack_table[i] = NULL; - } else { + pr_info("Stack Depot allocating hash table with kvcalloc\n"); + stack_table = kvcalloc(STACK_HASH_SIZE, sizeof(struct stack_record *), GFP_KERNEL); + if (!stack_table) { pr_err("Stack Depot hash table allocation failed, disabling\n"); stack_depot_disable = true; - mutex_unlock(&stack_depot_init_mutex); - return -ENOMEM; + ret = -ENOMEM; } } mutex_unlock(&stack_depot_init_mutex); - return 0; + return ret; } EXPORT_SYMBOL_GPL(stack_depot_init); diff --git a/lib/string.c b/lib/string.c index 485777c9da83..6f334420f687 100644 --- a/lib/string.c +++ b/lib/string.c @@ -517,21 +517,13 @@ EXPORT_SYMBOL(strnlen); size_t strspn(const char *s, const char *accept) { const char *p; - const char *a; - size_t count = 0; for (p = s; *p != '\0'; ++p) { - for (a = accept; *a != '\0'; ++a) { - if (*p == *a) - break; - } - if (*a == '\0') - return count; - ++count; + if (!strchr(accept, *p)) + break; } - return count; + return p - s; } - EXPORT_SYMBOL(strspn); #endif @@ -544,17 +536,12 @@ EXPORT_SYMBOL(strspn); size_t strcspn(const char *s, const char *reject) { const char *p; - const char *r; - size_t count = 0; for (p = s; *p != '\0'; ++p) { - for (r = reject; *r != '\0'; ++r) { - if (*p == *r) - return count; - } - ++count; + if (strchr(reject, *p)) + break; } - return count; + return p - s; } EXPORT_SYMBOL(strcspn); #endif diff --git a/lib/string_helpers.c b/lib/string_helpers.c index 4f877e9551d5..5ed3beb066e6 100644 --- a/lib/string_helpers.c +++ b/lib/string_helpers.c @@ -757,6 +757,9 @@ char **devm_kasprintf_strarray(struct device *dev, const char *prefix, size_t n) return ERR_PTR(-ENOMEM); } + ptr->n = n; + devres_add(dev, ptr); + return ptr->array; } EXPORT_SYMBOL_GPL(devm_kasprintf_strarray); diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c index 0c82f07f74fc..d5923a640457 100644 --- a/lib/test_bitmap.c +++ b/lib/test_bitmap.c @@ -585,6 +585,30 @@ static void __init test_bitmap_arr32(void) } } +static void __init test_bitmap_arr64(void) +{ + unsigned int nbits, next_bit; + u64 arr[EXP1_IN_BITS / 64]; + DECLARE_BITMAP(bmap2, EXP1_IN_BITS); + + memset(arr, 0xa5, sizeof(arr)); + + for (nbits = 0; nbits < EXP1_IN_BITS; ++nbits) { + memset(bmap2, 0xff, sizeof(arr)); + bitmap_to_arr64(arr, exp1, nbits); + bitmap_from_arr64(bmap2, arr, nbits); + expect_eq_bitmap(bmap2, exp1, nbits); + + next_bit = find_next_bit(bmap2, round_up(nbits, BITS_PER_LONG), nbits); + if (next_bit < round_up(nbits, BITS_PER_LONG)) + pr_err("bitmap_copy_arr64(nbits == %d:" + " tail is not safely cleared: %d\n", nbits, next_bit); + + if (nbits < EXP1_IN_BITS - 64) + expect_eq_uint(arr[DIV_ROUND_UP(nbits, 64)], 0xa5a5a5a5); + } +} + static void noinline __init test_mem_optimisations(void) { DECLARE_BITMAP(bmap1, 1024); @@ -852,6 +876,7 @@ static void __init selftest(void) test_copy(); test_replace(); test_bitmap_arr32(); + test_bitmap_arr64(); test_bitmap_parse(); test_bitmap_parselist(); test_bitmap_printlist(); diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 0c5cb2d6436a..2a7836e115b4 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -53,6 +53,7 @@ #define FLAG_EXPECTED_FAIL BIT(1) #define FLAG_SKB_FRAG BIT(2) #define FLAG_VERIFIER_ZEXT BIT(3) +#define FLAG_LARGE_MEM BIT(4) enum { CLASSIC = BIT(6), /* Old BPF instructions only. */ @@ -7838,7 +7839,7 @@ static struct bpf_test tests[] = { }, /* BPF_LDX_MEM B/H/W/DW */ { - "BPF_LDX_MEM | BPF_B", + "BPF_LDX_MEM | BPF_B, base", .u.insns_int = { BPF_LD_IMM64(R1, 0x0102030405060708ULL), BPF_LD_IMM64(R2, 0x0000000000000008ULL), @@ -7878,7 +7879,56 @@ static struct bpf_test tests[] = { .stack_depth = 8, }, { - "BPF_LDX_MEM | BPF_H", + "BPF_LDX_MEM | BPF_B, negative offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x0000000000000088ULL), + BPF_ALU64_IMM(BPF_ADD, R1, 512), + BPF_STX_MEM(BPF_B, R1, R2, -256), + BPF_LDX_MEM(BPF_B, R0, R1, -256), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 512, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_B, small positive offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x0000000000000088ULL), + BPF_STX_MEM(BPF_B, R1, R2, 256), + BPF_LDX_MEM(BPF_B, R0, R1, 256), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 512, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_B, large positive offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x0000000000000088ULL), + BPF_STX_MEM(BPF_B, R1, R2, 4096), + BPF_LDX_MEM(BPF_B, R0, R1, 4096), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 4096 + 16, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_H, base", .u.insns_int = { BPF_LD_IMM64(R1, 0x0102030405060708ULL), BPF_LD_IMM64(R2, 0x0000000000000708ULL), @@ -7918,7 +7968,72 @@ static struct bpf_test tests[] = { .stack_depth = 8, }, { - "BPF_LDX_MEM | BPF_W", + "BPF_LDX_MEM | BPF_H, negative offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x0000000000008788ULL), + BPF_ALU64_IMM(BPF_ADD, R1, 512), + BPF_STX_MEM(BPF_H, R1, R2, -256), + BPF_LDX_MEM(BPF_H, R0, R1, -256), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 512, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_H, small positive offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x0000000000008788ULL), + BPF_STX_MEM(BPF_H, R1, R2, 256), + BPF_LDX_MEM(BPF_H, R0, R1, 256), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 512, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_H, large positive offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x0000000000008788ULL), + BPF_STX_MEM(BPF_H, R1, R2, 8192), + BPF_LDX_MEM(BPF_H, R0, R1, 8192), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 8192 + 16, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_H, unaligned positive offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x0000000000008788ULL), + BPF_STX_MEM(BPF_H, R1, R2, 13), + BPF_LDX_MEM(BPF_H, R0, R1, 13), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 32, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_W, base", .u.insns_int = { BPF_LD_IMM64(R1, 0x0102030405060708ULL), BPF_LD_IMM64(R2, 0x0000000005060708ULL), @@ -7957,6 +8072,162 @@ static struct bpf_test tests[] = { { { 0, 0 } }, .stack_depth = 8, }, + { + "BPF_LDX_MEM | BPF_W, negative offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x0000000085868788ULL), + BPF_ALU64_IMM(BPF_ADD, R1, 512), + BPF_STX_MEM(BPF_W, R1, R2, -256), + BPF_LDX_MEM(BPF_W, R0, R1, -256), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 512, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_W, small positive offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x0000000085868788ULL), + BPF_STX_MEM(BPF_W, R1, R2, 256), + BPF_LDX_MEM(BPF_W, R0, R1, 256), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 512, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_W, large positive offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x0000000085868788ULL), + BPF_STX_MEM(BPF_W, R1, R2, 16384), + BPF_LDX_MEM(BPF_W, R0, R1, 16384), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 16384 + 16, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_W, unaligned positive offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_LD_IMM64(R3, 0x0000000085868788ULL), + BPF_STX_MEM(BPF_W, R1, R2, 13), + BPF_LDX_MEM(BPF_W, R0, R1, 13), + BPF_JMP_REG(BPF_JNE, R0, R3, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 32, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_DW, base", + .u.insns_int = { + BPF_LD_IMM64(R1, 0x0102030405060708ULL), + BPF_STX_MEM(BPF_DW, R10, R1, -8), + BPF_LDX_MEM(BPF_DW, R0, R10, -8), + BPF_JMP_REG(BPF_JNE, R0, R1, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0 } }, + .stack_depth = 8, + }, + { + "BPF_LDX_MEM | BPF_DW, MSB set", + .u.insns_int = { + BPF_LD_IMM64(R1, 0x8182838485868788ULL), + BPF_STX_MEM(BPF_DW, R10, R1, -8), + BPF_LDX_MEM(BPF_DW, R0, R10, -8), + BPF_JMP_REG(BPF_JNE, R0, R1, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0 } }, + .stack_depth = 8, + }, + { + "BPF_LDX_MEM | BPF_DW, negative offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_ALU64_IMM(BPF_ADD, R1, 512), + BPF_STX_MEM(BPF_DW, R1, R2, -256), + BPF_LDX_MEM(BPF_DW, R0, R1, -256), + BPF_JMP_REG(BPF_JNE, R0, R2, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 512, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_DW, small positive offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_STX_MEM(BPF_DW, R1, R2, 256), + BPF_LDX_MEM(BPF_DW, R0, R1, 256), + BPF_JMP_REG(BPF_JNE, R0, R2, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 512, 0 } }, + .stack_depth = 8, + }, + { + "BPF_LDX_MEM | BPF_DW, large positive offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_STX_MEM(BPF_DW, R1, R2, 32760), + BPF_LDX_MEM(BPF_DW, R0, R1, 32760), + BPF_JMP_REG(BPF_JNE, R0, R2, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 32768, 0 } }, + .stack_depth = 0, + }, + { + "BPF_LDX_MEM | BPF_DW, unaligned positive offset", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x8182838485868788ULL), + BPF_STX_MEM(BPF_DW, R1, R2, 13), + BPF_LDX_MEM(BPF_DW, R0, R1, 13), + BPF_JMP_REG(BPF_JNE, R0, R2, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL | FLAG_LARGE_MEM, + { }, + { { 32, 0 } }, + .stack_depth = 0, + }, /* BPF_STX_MEM B/H/W/DW */ { "BPF_STX_MEM | BPF_B", @@ -14094,6 +14365,9 @@ static void *generate_test_data(struct bpf_test *test, int sub) if (test->aux & FLAG_NO_DATA) return NULL; + if (test->aux & FLAG_LARGE_MEM) + return kmalloc(test->test[sub].data_size, GFP_KERNEL); + /* Test case expects an skb, so populate one. Various * subtests generate skbs of different sizes based on * the same data. @@ -14137,7 +14411,10 @@ static void release_test_data(const struct bpf_test *test, void *data) if (test->aux & FLAG_NO_DATA) return; - kfree_skb(data); + if (test->aux & FLAG_LARGE_MEM) + kfree(data); + else + kfree_skb(data); } static int filter_length(int which) @@ -14674,6 +14951,36 @@ static struct tail_call_test tail_call_tests[] = { .result = 10, }, { + "Tail call load/store leaf", + .insns = { + BPF_ALU64_IMM(BPF_MOV, R1, 1), + BPF_ALU64_IMM(BPF_MOV, R2, 2), + BPF_ALU64_REG(BPF_MOV, R3, BPF_REG_FP), + BPF_STX_MEM(BPF_DW, R3, R1, -8), + BPF_STX_MEM(BPF_DW, R3, R2, -16), + BPF_LDX_MEM(BPF_DW, R0, BPF_REG_FP, -8), + BPF_JMP_REG(BPF_JNE, R0, R1, 3), + BPF_LDX_MEM(BPF_DW, R0, BPF_REG_FP, -16), + BPF_JMP_REG(BPF_JNE, R0, R2, 1), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + .result = 0, + .stack_depth = 32, + }, + { + "Tail call load/store", + .insns = { + BPF_ALU64_IMM(BPF_MOV, R0, 3), + BPF_STX_MEM(BPF_DW, BPF_REG_FP, R0, -8), + TAIL_CALL(-1), + BPF_ALU64_IMM(BPF_MOV, R0, -1), + BPF_EXIT_INSN(), + }, + .result = 0, + .stack_depth = 16, + }, + { "Tail call error path, max count reached", .insns = { BPF_LDX_MEM(BPF_W, R2, R1, 0), diff --git a/lib/test_firmware.c b/lib/test_firmware.c index 1bccd6cd5f48..c82b65947ce6 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -31,9 +31,12 @@ MODULE_IMPORT_NS(TEST_FIRMWARE); #define TEST_FIRMWARE_NAME "test-firmware.bin" #define TEST_FIRMWARE_NUM_REQS 4 #define TEST_FIRMWARE_BUF_SIZE SZ_1K +#define TEST_UPLOAD_MAX_SIZE SZ_2K +#define TEST_UPLOAD_BLK_SIZE 37 /* Avoid powers of two in testing */ static DEFINE_MUTEX(test_fw_mutex); static const struct firmware *test_firmware; +static LIST_HEAD(test_upload_list); struct test_batched_req { u8 idx; @@ -63,6 +66,7 @@ struct test_batched_req { * @reqs: stores all requests information * @read_fw_idx: index of thread from which we want to read firmware results * from through the read_fw trigger. + * @upload_name: firmware name to be used with upload_read sysfs node * @test_result: a test may use this to collect the result from the call * of the request_firmware*() calls used in their tests. In order of * priority we always keep first any setup error. If no setup errors were @@ -101,6 +105,7 @@ struct test_config { bool send_uevent; u8 num_requests; u8 read_fw_idx; + char *upload_name; /* * These below don't belong her but we'll move them once we create @@ -112,8 +117,34 @@ struct test_config { struct device *device); }; +struct upload_inject_err { + const char *prog; + enum fw_upload_err err_code; +}; + +struct test_firmware_upload { + char *name; + struct list_head node; + char *buf; + size_t size; + bool cancel_request; + struct upload_inject_err inject; + struct fw_upload *fwl; +}; + static struct test_config *test_fw_config; +static struct test_firmware_upload *upload_lookup_name(const char *name) +{ + struct test_firmware_upload *tst; + + list_for_each_entry(tst, &test_upload_list, node) + if (strncmp(name, tst->name, strlen(tst->name)) == 0) + return tst; + + return NULL; +} + static ssize_t test_fw_misc_read(struct file *f, char __user *buf, size_t size, loff_t *offset) { @@ -198,6 +229,7 @@ static int __test_firmware_config_init(void) test_fw_config->req_firmware = request_firmware; test_fw_config->test_result = 0; test_fw_config->reqs = NULL; + test_fw_config->upload_name = NULL; return 0; @@ -277,6 +309,13 @@ static ssize_t config_show(struct device *dev, test_fw_config->sync_direct ? "true" : "false"); len += scnprintf(buf + len, PAGE_SIZE - len, "read_fw_idx:\t%u\n", test_fw_config->read_fw_idx); + if (test_fw_config->upload_name) + len += scnprintf(buf + len, PAGE_SIZE - len, + "upload_name:\t%s\n", + test_fw_config->upload_name); + else + len += scnprintf(buf + len, PAGE_SIZE - len, + "upload_name:\tEMTPY\n"); mutex_unlock(&test_fw_mutex); @@ -392,6 +431,32 @@ static ssize_t config_name_show(struct device *dev, } static DEVICE_ATTR_RW(config_name); +static ssize_t config_upload_name_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct test_firmware_upload *tst; + int ret = count; + + mutex_lock(&test_fw_mutex); + tst = upload_lookup_name(buf); + if (tst) + test_fw_config->upload_name = tst->name; + else + ret = -EINVAL; + mutex_unlock(&test_fw_mutex); + + return ret; +} + +static ssize_t config_upload_name_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return config_test_show_str(buf, test_fw_config->upload_name); +} +static DEVICE_ATTR_RW(config_upload_name); + static ssize_t config_num_requests_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -989,6 +1054,278 @@ out: } static DEVICE_ATTR_WO(trigger_batched_requests_async); +static void upload_release(struct test_firmware_upload *tst) +{ + firmware_upload_unregister(tst->fwl); + kfree(tst->buf); + kfree(tst->name); + kfree(tst); +} + +static void upload_release_all(void) +{ + struct test_firmware_upload *tst, *tmp; + + list_for_each_entry_safe(tst, tmp, &test_upload_list, node) { + list_del(&tst->node); + upload_release(tst); + } + test_fw_config->upload_name = NULL; +} + +/* + * This table is replicated from .../firmware_loader/sysfs_upload.c + * and needs to be kept in sync. + */ +static const char * const fw_upload_err_str[] = { + [FW_UPLOAD_ERR_NONE] = "none", + [FW_UPLOAD_ERR_HW_ERROR] = "hw-error", + [FW_UPLOAD_ERR_TIMEOUT] = "timeout", + [FW_UPLOAD_ERR_CANCELED] = "user-abort", + [FW_UPLOAD_ERR_BUSY] = "device-busy", + [FW_UPLOAD_ERR_INVALID_SIZE] = "invalid-file-size", + [FW_UPLOAD_ERR_RW_ERROR] = "read-write-error", + [FW_UPLOAD_ERR_WEAROUT] = "flash-wearout", +}; + +static void upload_err_inject_error(struct test_firmware_upload *tst, + const u8 *p, const char *prog) +{ + enum fw_upload_err err; + + for (err = FW_UPLOAD_ERR_NONE + 1; err < FW_UPLOAD_ERR_MAX; err++) { + if (strncmp(p, fw_upload_err_str[err], + strlen(fw_upload_err_str[err])) == 0) { + tst->inject.prog = prog; + tst->inject.err_code = err; + return; + } + } +} + +static void upload_err_inject_prog(struct test_firmware_upload *tst, + const u8 *p) +{ + static const char * const progs[] = { + "preparing:", "transferring:", "programming:" + }; + int i; + + for (i = 0; i < ARRAY_SIZE(progs); i++) { + if (strncmp(p, progs[i], strlen(progs[i])) == 0) { + upload_err_inject_error(tst, p + strlen(progs[i]), + progs[i]); + return; + } + } +} + +#define FIVE_MINUTES_MS (5 * 60 * 1000) +static enum fw_upload_err +fw_upload_wait_on_cancel(struct test_firmware_upload *tst) +{ + int ms_delay; + + for (ms_delay = 0; ms_delay < FIVE_MINUTES_MS; ms_delay += 100) { + msleep(100); + if (tst->cancel_request) + return FW_UPLOAD_ERR_CANCELED; + } + return FW_UPLOAD_ERR_NONE; +} + +static enum fw_upload_err test_fw_upload_prepare(struct fw_upload *fwl, + const u8 *data, u32 size) +{ + struct test_firmware_upload *tst = fwl->dd_handle; + enum fw_upload_err ret = FW_UPLOAD_ERR_NONE; + const char *progress = "preparing:"; + + tst->cancel_request = false; + + if (!size || size > TEST_UPLOAD_MAX_SIZE) { + ret = FW_UPLOAD_ERR_INVALID_SIZE; + goto err_out; + } + + if (strncmp(data, "inject:", strlen("inject:")) == 0) + upload_err_inject_prog(tst, data + strlen("inject:")); + + memset(tst->buf, 0, TEST_UPLOAD_MAX_SIZE); + tst->size = size; + + if (tst->inject.err_code == FW_UPLOAD_ERR_NONE || + strncmp(tst->inject.prog, progress, strlen(progress)) != 0) + return FW_UPLOAD_ERR_NONE; + + if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED) + ret = fw_upload_wait_on_cancel(tst); + else + ret = tst->inject.err_code; + +err_out: + /* + * The cleanup op only executes if the prepare op succeeds. + * If the prepare op fails, it must do it's own clean-up. + */ + tst->inject.err_code = FW_UPLOAD_ERR_NONE; + tst->inject.prog = NULL; + + return ret; +} + +static enum fw_upload_err test_fw_upload_write(struct fw_upload *fwl, + const u8 *data, u32 offset, + u32 size, u32 *written) +{ + struct test_firmware_upload *tst = fwl->dd_handle; + const char *progress = "transferring:"; + u32 blk_size; + + if (tst->cancel_request) + return FW_UPLOAD_ERR_CANCELED; + + blk_size = min_t(u32, TEST_UPLOAD_BLK_SIZE, size); + memcpy(tst->buf + offset, data + offset, blk_size); + + *written = blk_size; + + if (tst->inject.err_code == FW_UPLOAD_ERR_NONE || + strncmp(tst->inject.prog, progress, strlen(progress)) != 0) + return FW_UPLOAD_ERR_NONE; + + if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED) + return fw_upload_wait_on_cancel(tst); + + return tst->inject.err_code; +} + +static enum fw_upload_err test_fw_upload_complete(struct fw_upload *fwl) +{ + struct test_firmware_upload *tst = fwl->dd_handle; + const char *progress = "programming:"; + + if (tst->cancel_request) + return FW_UPLOAD_ERR_CANCELED; + + if (tst->inject.err_code == FW_UPLOAD_ERR_NONE || + strncmp(tst->inject.prog, progress, strlen(progress)) != 0) + return FW_UPLOAD_ERR_NONE; + + if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED) + return fw_upload_wait_on_cancel(tst); + + return tst->inject.err_code; +} + +static void test_fw_upload_cancel(struct fw_upload *fwl) +{ + struct test_firmware_upload *tst = fwl->dd_handle; + + tst->cancel_request = true; +} + +static void test_fw_cleanup(struct fw_upload *fwl) +{ + struct test_firmware_upload *tst = fwl->dd_handle; + + tst->inject.err_code = FW_UPLOAD_ERR_NONE; + tst->inject.prog = NULL; +} + +static const struct fw_upload_ops upload_test_ops = { + .prepare = test_fw_upload_prepare, + .write = test_fw_upload_write, + .poll_complete = test_fw_upload_complete, + .cancel = test_fw_upload_cancel, + .cleanup = test_fw_cleanup +}; + +static ssize_t upload_register_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct test_firmware_upload *tst; + struct fw_upload *fwl; + char *name; + int ret; + + name = kstrndup(buf, count, GFP_KERNEL); + if (!name) + return -ENOMEM; + + mutex_lock(&test_fw_mutex); + tst = upload_lookup_name(name); + if (tst) { + ret = -EEXIST; + goto free_name; + } + + tst = kzalloc(sizeof(*tst), GFP_KERNEL); + if (!tst) { + ret = -ENOMEM; + goto free_name; + } + + tst->name = name; + tst->buf = kzalloc(TEST_UPLOAD_MAX_SIZE, GFP_KERNEL); + if (!tst->buf) { + ret = -ENOMEM; + goto free_tst; + } + + fwl = firmware_upload_register(THIS_MODULE, dev, tst->name, + &upload_test_ops, tst); + if (IS_ERR(fwl)) { + ret = PTR_ERR(fwl); + goto free_buf; + } + + tst->fwl = fwl; + list_add_tail(&tst->node, &test_upload_list); + mutex_unlock(&test_fw_mutex); + return count; + +free_buf: + kfree(tst->buf); + +free_tst: + kfree(tst); + +free_name: + mutex_unlock(&test_fw_mutex); + kfree(name); + + return ret; +} +static DEVICE_ATTR_WO(upload_register); + +static ssize_t upload_unregister_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct test_firmware_upload *tst; + int ret = count; + + mutex_lock(&test_fw_mutex); + tst = upload_lookup_name(buf); + if (!tst) { + ret = -EINVAL; + goto out; + } + + if (test_fw_config->upload_name == tst->name) + test_fw_config->upload_name = NULL; + + list_del(&tst->node); + upload_release(tst); + +out: + mutex_unlock(&test_fw_mutex); + return ret; +} +static DEVICE_ATTR_WO(upload_unregister); + static ssize_t test_result_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1051,6 +1388,45 @@ out: } static DEVICE_ATTR_RO(read_firmware); +static ssize_t upload_read_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct test_firmware_upload *tst = NULL; + struct test_firmware_upload *tst_iter; + int ret = -EINVAL; + + if (!test_fw_config->upload_name) { + pr_err("Set config_upload_name before using upload_read\n"); + return -EINVAL; + } + + mutex_lock(&test_fw_mutex); + list_for_each_entry(tst_iter, &test_upload_list, node) + if (tst_iter->name == test_fw_config->upload_name) { + tst = tst_iter; + break; + } + + if (!tst) { + pr_err("Firmware name not found: %s\n", + test_fw_config->upload_name); + goto out; + } + + if (tst->size > PAGE_SIZE) { + pr_err("Testing interface must use PAGE_SIZE firmware for now\n"); + goto out; + } + + memcpy(buf, tst->buf, tst->size); + ret = tst->size; +out: + mutex_unlock(&test_fw_mutex); + return ret; +} +static DEVICE_ATTR_RO(upload_read); + #define TEST_FW_DEV_ATTR(name) &dev_attr_##name.attr static struct attribute *test_dev_attrs[] = { @@ -1066,6 +1442,7 @@ static struct attribute *test_dev_attrs[] = { TEST_FW_DEV_ATTR(config_sync_direct), TEST_FW_DEV_ATTR(config_send_uevent), TEST_FW_DEV_ATTR(config_read_fw_idx), + TEST_FW_DEV_ATTR(config_upload_name), /* These don't use the config at all - they could be ported! */ TEST_FW_DEV_ATTR(trigger_request), @@ -1082,6 +1459,9 @@ static struct attribute *test_dev_attrs[] = { TEST_FW_DEV_ATTR(release_all_firmware), TEST_FW_DEV_ATTR(test_result), TEST_FW_DEV_ATTR(read_firmware), + TEST_FW_DEV_ATTR(upload_read), + TEST_FW_DEV_ATTR(upload_register), + TEST_FW_DEV_ATTR(upload_unregister), NULL, }; @@ -1128,6 +1508,7 @@ static void __exit test_firmware_exit(void) mutex_lock(&test_fw_mutex); release_firmware(test_firmware); misc_deregister(&test_fw_misc_device); + upload_release_all(); __test_firmware_config_free(); kfree(test_fw_config); mutex_unlock(&test_fw_mutex); diff --git a/lib/test_kasan.c b/lib/test_kasan.c index ad880231dfa8..c233b1a4e984 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -391,7 +391,7 @@ static void krealloc_uaf(struct kunit *test) kfree(ptr1); KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL)); - KUNIT_ASSERT_PTR_EQ(test, (void *)ptr2, NULL); + KUNIT_ASSERT_NULL(test, ptr2); KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1); } diff --git a/lib/test_meminit.c b/lib/test_meminit.c index 3ca717f11397..c95db11a6906 100644 --- a/lib/test_meminit.c +++ b/lib/test_meminit.c @@ -279,13 +279,18 @@ static int __init do_kmem_cache_rcu_persistent(int size, int *total_failures) c = kmem_cache_create("test_cache", size, size, SLAB_TYPESAFE_BY_RCU, NULL); buf = kmem_cache_alloc(c, GFP_KERNEL); + if (!buf) + goto out; saved_ptr = buf; fill_with_garbage(buf, size); buf_contents = kmalloc(size, GFP_KERNEL); - if (!buf_contents) + if (!buf_contents) { + kmem_cache_free(c, buf); goto out; + } used_objects = kmalloc_array(maxiter, sizeof(void *), GFP_KERNEL); if (!used_objects) { + kmem_cache_free(c, buf); kfree(buf_contents); goto out; } @@ -306,11 +311,14 @@ static int __init do_kmem_cache_rcu_persistent(int size, int *total_failures) } } + for (iter = 0; iter < maxiter; iter++) + kmem_cache_free(c, used_objects[iter]); + free_out: - kmem_cache_destroy(c); kfree(buf_contents); kfree(used_objects); out: + kmem_cache_destroy(c); *total_failures += fail; return 1; } diff --git a/lib/test_siphash.c b/lib/test_siphash.c index a6d854d933bf..a96788d0141d 100644 --- a/lib/test_siphash.c +++ b/lib/test_siphash.c @@ -1,8 +1,7 @@ -/* Test cases for siphash.c +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* Copyright (C) 2016-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. * - * Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. - * - * This file is provided under a dual BSD/GPLv2 license. + * Test cases for siphash.c * * SipHash: a fast short-input PRF * https://131002.net/siphash/ diff --git a/lib/test_string.c b/lib/test_string.c index 9dfd6f52de92..c5cb92fb710e 100644 --- a/lib/test_string.c +++ b/lib/test_string.c @@ -179,6 +179,34 @@ static __init int strnchr_selftest(void) return 0; } +static __init int strspn_selftest(void) +{ + static const struct strspn_test { + const char str[16]; + const char accept[16]; + const char reject[16]; + unsigned a; + unsigned r; + } tests[] __initconst = { + { "foobar", "", "", 0, 6 }, + { "abba", "abc", "ABBA", 4, 4 }, + { "abba", "a", "b", 1, 1 }, + { "", "abc", "abc", 0, 0}, + }; + const struct strspn_test *s = tests; + size_t i, res; + + for (i = 0; i < ARRAY_SIZE(tests); ++i, ++s) { + res = strspn(s->str, s->accept); + if (res != s->a) + return 0x100 + 2*i; + res = strcspn(s->str, s->reject); + if (res != s->r) + return 0x100 + 2*i + 1; + } + return 0; +} + static __exit void string_selftest_remove(void) { } @@ -212,6 +240,11 @@ static __init int string_selftest_init(void) if (subtest) goto fail; + test = 6; + subtest = strspn_selftest(); + if (subtest) + goto fail; + pr_info("String selftests succeeded\n"); return 0; fail: diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c index a5a3d6c27e1f..9a564971f539 100644 --- a/lib/test_sysctl.c +++ b/lib/test_sysctl.c @@ -38,6 +38,7 @@ static int i_zero; static int i_one_hundred = 100; +static int match_int_ok = 1; struct test_sysctl_data { int int_0001; @@ -96,6 +97,13 @@ static struct ctl_table test_table[] = { .proc_handler = proc_dointvec, }, { + .procname = "match_int", + .data = &match_int_ok, + .maxlen = sizeof(match_int_ok), + .mode = 0444, + .proc_handler = proc_dointvec, + }, + { .procname = "boot_int", .data = &test_data.boot_int, .maxlen = sizeof(test_data.boot_int), @@ -132,6 +140,30 @@ static struct ctl_table_header *test_sysctl_header; static int __init test_sysctl_init(void) { + int i; + + struct { + int defined; + int wanted; + } match_int[] = { + {.defined = *(int *)SYSCTL_ZERO, .wanted = 0}, + {.defined = *(int *)SYSCTL_ONE, .wanted = 1}, + {.defined = *(int *)SYSCTL_TWO, .wanted = 2}, + {.defined = *(int *)SYSCTL_THREE, .wanted = 3}, + {.defined = *(int *)SYSCTL_FOUR, .wanted = 4}, + {.defined = *(int *)SYSCTL_ONE_HUNDRED, .wanted = 100}, + {.defined = *(int *)SYSCTL_TWO_HUNDRED, .wanted = 200}, + {.defined = *(int *)SYSCTL_ONE_THOUSAND, .wanted = 1000}, + {.defined = *(int *)SYSCTL_THREE_THOUSAND, .wanted = 3000}, + {.defined = *(int *)SYSCTL_INT_MAX, .wanted = INT_MAX}, + {.defined = *(int *)SYSCTL_MAXOLDUID, .wanted = 65535}, + {.defined = *(int *)SYSCTL_NEG_ONE, .wanted = -1}, + }; + + for (i = 0; i < ARRAY_SIZE(match_int); i++) + if (match_int[i].defined != match_int[i].wanted) + match_int_ok = 0; + test_data.bitmap_0001 = kzalloc(SYSCTL_TEST_BITMAP_SIZE/8, GFP_KERNEL); if (!test_data.bitmap_0001) return -ENOMEM; diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 40d26a07a133..3c1853a9d1c0 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -750,61 +750,37 @@ static int __init debug_boot_weak_hash_enable(char *str) } early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable); -static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key); -static siphash_key_t ptr_key __read_mostly; +static DEFINE_STATIC_KEY_FALSE(filled_random_ptr_key); static void enable_ptr_key_workfn(struct work_struct *work) { - get_random_bytes(&ptr_key, sizeof(ptr_key)); - /* Needs to run from preemptible context */ - static_branch_disable(¬_filled_random_ptr_key); + static_branch_enable(&filled_random_ptr_key); } -static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn); - -static int fill_random_ptr_key(struct notifier_block *nb, - unsigned long action, void *data) +/* Maps a pointer to a 32 bit unique identifier. */ +static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out) { - /* This may be in an interrupt handler. */ - queue_work(system_unbound_wq, &enable_ptr_key_work); - return 0; -} - -static struct notifier_block random_ready = { - .notifier_call = fill_random_ptr_key -}; + static siphash_key_t ptr_key __read_mostly; + unsigned long hashval; -static int __init initialize_ptr_random(void) -{ - int key_size = sizeof(ptr_key); - int ret; + if (!static_branch_likely(&filled_random_ptr_key)) { + static bool filled = false; + static DEFINE_SPINLOCK(filling); + static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn); + unsigned long flags; - /* Use hw RNG if available. */ - if (get_random_bytes_arch(&ptr_key, key_size) == key_size) { - static_branch_disable(¬_filled_random_ptr_key); - return 0; - } + if (!system_unbound_wq || !rng_is_initialized() || + !spin_trylock_irqsave(&filling, flags)) + return -EAGAIN; - ret = register_random_ready_notifier(&random_ready); - if (!ret) { - return 0; - } else if (ret == -EALREADY) { - /* This is in preemptible context */ - enable_ptr_key_workfn(&enable_ptr_key_work); - return 0; + if (!filled) { + get_random_bytes(&ptr_key, sizeof(ptr_key)); + queue_work(system_unbound_wq, &enable_ptr_key_work); + filled = true; + } + spin_unlock_irqrestore(&filling, flags); } - return ret; -} -early_initcall(initialize_ptr_random); - -/* Maps a pointer to a 32 bit unique identifier. */ -static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out) -{ - unsigned long hashval; - - if (static_branch_unlikely(¬_filled_random_ptr_key)) - return -EAGAIN; #ifdef CONFIG_64BIT hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key); diff --git a/lib/xarray.c b/lib/xarray.c index 54e646e8e6ee..ea9ce1f0b386 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -264,9 +264,10 @@ static void xa_node_free(struct xa_node *node) * xas_destroy() - Free any resources allocated during the XArray operation. * @xas: XArray operation state. * - * This function is now internal-only. + * Most users will not need to call this function; it is called for you + * by xas_nomem(). */ -static void xas_destroy(struct xa_state *xas) +void xas_destroy(struct xa_state *xas) { struct xa_node *next, *node = xas->xa_alloc; |
