summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Kconfig.debug165
-rw-r--r--lib/Kconfig.kasan7
-rw-r--r--lib/Makefile7
-rw-r--r--lib/alloc_tag.c515
-rw-r--r--lib/codetag.c104
-rw-r--r--lib/crc16_kunit.c155
-rw-r--r--lib/list-test.c4
-rw-r--r--lib/list_sort.c3
-rw-r--r--lib/maple_tree.c249
-rw-r--r--lib/min_heap.c70
-rw-r--r--lib/overflow_kunit.c2
-rw-r--r--lib/percpu_test.c11
-rw-r--r--lib/scatterlist.c4
-rw-r--r--lib/slub_kunit.c42
-rw-r--r--lib/string_helpers.c2
-rw-r--r--lib/strncpy_from_user.c5
-rw-r--r--lib/test_maple_tree.c90
-rw-r--r--lib/test_min_heap.c16
-rw-r--r--lib/tests/Makefile1
-rw-r--r--lib/tests/module/.gitignore4
-rw-r--r--lib/tests/module/Makefile15
-rwxr-xr-xlib/tests/module/gen_test_kallsyms.sh129
-rw-r--r--lib/util_macros_kunit.c240
24 files changed, 1638 insertions, 208 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 50d85f38b569..5a318f753b2f 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -789,3 +789,9 @@ config POLYNOMIAL
config FIRMWARE_TABLE
bool
+
+config UNION_FIND
+ bool
+
+config MIN_HEAP
+ bool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index a2fe6d5cfbd2..f340017585c5 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -993,6 +993,7 @@ config CODE_TAGGING
config MEM_ALLOC_PROFILING
bool "Enable memory allocation profiling"
default n
+ depends on MMU
depends on PROC_FS
depends on !DEBUG_FORCE_WEAK_PER_CPU
select CODE_TAGGING
@@ -2268,6 +2269,7 @@ config TEST_LIST_SORT
config TEST_MIN_HEAP
tristate "Min heap test"
depends on DEBUG_KERNEL || m
+ select MIN_HEAP
help
Enable this to turn on min heap function tests. This test is
executed only once during system boot (so affects only boot time),
@@ -2618,6 +2620,23 @@ config CHECKSUM_KUNIT
If unsure, say N.
+config UTIL_MACROS_KUNIT
+ tristate "KUnit test util_macros.h functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to test the util_macros.h function at boot.
+
+ KUnit tests run during boot and output the results to the debug log
+ in TAP format (http://testanything.org/). Only useful for kernel devs
+ running the KUnit test harness, and not intended for inclusion into a
+ production build.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
config HASH_KUNIT_TEST
tristate "KUnit Test for integer hash functions" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -2839,6 +2858,15 @@ config USERCOPY_KUNIT_TEST
on the copy_to/from_user infrastructure, making sure basic
user/kernel boundary testing is working.
+config CRC16_KUNIT_TEST
+ tristate "KUnit tests for CRC16"
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ select CRC16
+ help
+ Enable this option to run unit tests for the kernel's CRC16
+ implementation (<linux/crc16.h>).
+
config TEST_UDELAY
tristate "udelay test driver"
help
@@ -2892,6 +2920,111 @@ config TEST_KMOD
If unsure, say N.
+config TEST_RUNTIME
+ bool
+
+config TEST_RUNTIME_MODULE
+ bool
+
+config TEST_KALLSYMS
+ tristate "module kallsyms find_symbol() test"
+ depends on m
+ select TEST_RUNTIME
+ select TEST_RUNTIME_MODULE
+ select TEST_KALLSYMS_A
+ select TEST_KALLSYMS_B
+ select TEST_KALLSYMS_C
+ select TEST_KALLSYMS_D
+ help
+ This allows us to stress test find_symbol() through the kallsyms
+ used to place symbols on the kernel ELF kallsyms and modules kallsyms
+ where we place kernel symbols such as exported symbols.
+
+ We have four test modules:
+
+ A: has KALLSYSMS_NUMSYMS exported symbols
+ B: uses one of A's symbols
+ C: adds KALLSYMS_SCALE_FACTOR * KALLSYSMS_NUMSYMS exported
+ D: adds 2 * the symbols than C
+
+ We stress test find_symbol() through two means:
+
+ 1) Upon load of B it will trigger simplify_symbols() to look for the
+ one symbol it uses from the module A with tons of symbols. This is an
+ indirect way for us to have B call resolve_symbol_wait() upon module
+ load. This will eventually call find_symbol() which will eventually
+ try to find the symbols used with find_exported_symbol_in_section().
+ find_exported_symbol_in_section() uses bsearch() so a binary search
+ for each symbol. Binary search will at worst be O(log(n)) so the
+ larger TEST_MODULE_KALLSYSMS the worse the search.
+
+ 2) The selftests should load C first, before B. Upon B's load towards
+ the end right before we call module B's init routine we get
+ complete_formation() called on the module. That will first check
+ for duplicate symbols with the call to verify_exported_symbols().
+ That is when we'll force iteration on module C's insane symbol list.
+ Since it has 10 * KALLSYMS_NUMSYMS it means we can first test
+ just loading B without C. The amount of time it takes to load C Vs
+ B can give us an idea of the impact growth of the symbol space and
+ give us projection. Module A only uses one symbol from B so to allow
+ this scaling in module C to be proportional, if it used more symbols
+ then the first test would be doing more and increasing just the
+ search space would be slightly different. The last module, module D
+ will just increase the search space by twice the number of symbols in
+ C so to allow for full projects.
+
+ tools/testing/selftests/module/find_symbol.sh
+
+ The current defaults will incur a build delay of about 7 minutes
+ on an x86_64 with only 8 cores. Enable this only if you want to
+ stress test find_symbol() with thousands of symbols. At the same
+ time this is also useful to test building modules with thousands of
+ symbols, and if BTF is enabled this also stress tests adding BTF
+ information for each module. Currently enabling many more symbols
+ will segfault the build system.
+
+ If unsure, say N.
+
+if TEST_KALLSYMS
+
+config TEST_KALLSYMS_A
+ tristate
+ depends on m
+
+config TEST_KALLSYMS_B
+ tristate
+ depends on m
+
+config TEST_KALLSYMS_C
+ tristate
+ depends on m
+
+config TEST_KALLSYMS_D
+ tristate
+ depends on m
+
+config TEST_KALLSYMS_NUMSYMS
+ int "test kallsyms number of symbols"
+ default 100
+ help
+ The number of symbols to create on TEST_KALLSYMS_A, only one of which
+ module TEST_KALLSYMS_B will use. This also will be used
+ for how many symbols TEST_KALLSYMS_C will have, scaled up by
+ TEST_KALLSYMS_SCALE_FACTOR. Note that setting this to 10,000 will
+ trigger a segfault today, don't use anything close to it unless
+ you are aware that this should not be used for automated build tests.
+
+config TEST_KALLSYMS_SCALE_FACTOR
+ int "test kallsyms scale factor"
+ default 8
+ help
+ How many more unusued symbols will TEST_KALLSYSMS_C have than
+ TEST_KALLSYMS_A. If 8, then module C will have 8 * syms
+ than module A. Then TEST_KALLSYMS_D will have double the amount
+ of symbols than C so to allow projections.
+
+endif # TEST_KALLSYMS
+
config TEST_DEBUG_VIRTUAL
tristate "Test CONFIG_DEBUG_VIRTUAL feature"
depends on DEBUG_VIRTUAL
@@ -2982,6 +3115,22 @@ config TEST_OBJPOOL
If unsure, say N.
+config INT_POW_TEST
+ tristate "Integer exponentiation (int_pow) test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ This option enables the KUnit test suite for the int_pow function,
+ which performs integer exponentiation. The test suite is designed to
+ verify that the implementation of int_pow correctly computes the power
+ of a given base raised to a given exponent.
+
+ Enabling this option will include tests that check various scenarios
+ and edge cases to ensure the accuracy and reliability of the exponentiation
+ function.
+
+ If unsure, say N
+
endif # RUNTIME_TESTING_MENU
config ARCH_USE_MEMTEST
@@ -3077,19 +3226,3 @@ config RUST_KERNEL_DOCTESTS
endmenu # "Rust"
endmenu # Kernel hacking
-
-config INT_POW_TEST
- tristate "Integer exponentiation (int_pow) test" if !KUNIT_ALL_TESTS
- depends on KUNIT
- default KUNIT_ALL_TESTS
- help
- This option enables the KUnit test suite for the int_pow function,
- which performs integer exponentiation. The test suite is designed to
- verify that the implementation of int_pow correctly computes the power
- of a given base raised to a given exponent.
-
- Enabling this option will include tests that check various scenarios
- and edge cases to ensure the accuracy and reliability of the exponentiation
- function.
-
- If unsure, say N
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 98016e137b7f..f82889a830fa 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -195,13 +195,6 @@ config KASAN_KUNIT_TEST
For more information on KUnit and unit tests in general, please refer
to the KUnit documentation in Documentation/dev-tools/kunit/.
-config KASAN_MODULE_TEST
- tristate "KUnit-incompatible tests of KASAN bug detection capabilities"
- depends on m && KASAN && !KASAN_HW_TAGS
- help
- A part of the KASAN test suite that is not integrated with KUnit.
- Incompatible with Hardware Tag-Based KASAN.
-
config KASAN_EXTRA_INFO
bool "Record and report more information"
depends on KASAN
diff --git a/lib/Makefile b/lib/Makefile
index b393dd8151e2..a8155c972f02 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -35,10 +35,12 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
nmi_backtrace.o win_minmax.o memcat_p.o \
- buildid.o objpool.o union_find.o iomem_copy.o
+ buildid.o objpool.o iomem_copy.o
+lib-$(CONFIG_UNION_FIND) += union_find.o
lib-$(CONFIG_PRINTK) += dump_stack.o
lib-$(CONFIG_SMP) += cpumask.o
+lib-$(CONFIG_MIN_HEAP) += min_heap.o
lib-y += kobject.o klist.o
obj-y += lockref.o
@@ -96,6 +98,7 @@ obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
obj-$(CONFIG_TEST_MAPLE_TREE) += test_maple_tree.o
obj-$(CONFIG_TEST_PARMAN) += test_parman.o
obj-$(CONFIG_TEST_KMOD) += test_kmod.o
+obj-$(CONFIG_TEST_RUNTIME) += tests/
obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o
@@ -371,6 +374,7 @@ obj-$(CONFIG_PLDMFW) += pldmfw/
CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN)
obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
obj-$(CONFIG_CHECKSUM_KUNIT) += checksum_kunit.o
+obj-$(CONFIG_UTIL_MACROS_KUNIT) += util_macros_kunit.o
obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
obj-$(CONFIG_HASHTABLE_KUNIT_TEST) += hashtable_test.o
obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
@@ -390,6 +394,7 @@ CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN)
obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o
obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o
obj-$(CONFIG_USERCOPY_KUNIT_TEST) += usercopy_kunit.o
+obj-$(CONFIG_CRC16_KUNIT_TEST) += crc16_kunit.o
obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index 81e5f9a70f22..2414a7ee7ec7 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -1,12 +1,26 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/alloc_tag.h>
+#include <linux/execmem.h>
#include <linux/fs.h>
#include <linux/gfp.h>
+#include <linux/kallsyms.h>
#include <linux/module.h>
#include <linux/page_ext.h>
#include <linux/proc_fs.h>
#include <linux/seq_buf.h>
#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+
+#define ALLOCINFO_FILE_NAME "allocinfo"
+#define MODULE_ALLOC_TAG_VMAP_SIZE (100000UL * sizeof(struct alloc_tag))
+#define SECTION_START(NAME) (CODETAG_SECTION_START_PREFIX NAME)
+#define SECTION_STOP(NAME) (CODETAG_SECTION_STOP_PREFIX NAME)
+
+#ifdef CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT
+static bool mem_profiling_support = true;
+#else
+static bool mem_profiling_support;
+#endif
static struct codetag_type *alloc_tag_cttype;
@@ -15,6 +29,11 @@ EXPORT_SYMBOL(_shared_alloc_tag);
DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
mem_alloc_profiling_key);
+DEFINE_STATIC_KEY_FALSE(mem_profiling_compressed);
+
+struct alloc_tag_kernel_section kernel_tags = { NULL, 0 };
+unsigned long alloc_tag_ref_mask;
+int alloc_tag_ref_offs;
struct allocinfo_private {
struct codetag_iterator iter;
@@ -144,44 +163,450 @@ size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sl
return nr;
}
+void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
+{
+ int i;
+ struct alloc_tag *tag;
+ unsigned int nr_pages = 1 << new_order;
+
+ if (!mem_alloc_profiling_enabled())
+ return;
+
+ tag = pgalloc_tag_get(&folio->page);
+ if (!tag)
+ return;
+
+ for (i = nr_pages; i < (1 << old_order); i += nr_pages) {
+ union pgtag_ref_handle handle;
+ union codetag_ref ref;
+
+ if (get_page_tag_ref(folio_page(folio, i), &ref, &handle)) {
+ /* Set new reference to point to the original tag */
+ alloc_tag_ref_set(&ref, tag);
+ update_page_tag_ref(handle, &ref);
+ put_page_tag_ref(handle);
+ }
+ }
+}
+
+void pgalloc_tag_copy(struct folio *new, struct folio *old)
+{
+ union pgtag_ref_handle handle;
+ union codetag_ref ref;
+ struct alloc_tag *tag;
+
+ tag = pgalloc_tag_get(&old->page);
+ if (!tag)
+ return;
+
+ if (!get_page_tag_ref(&new->page, &ref, &handle))
+ return;
+
+ /* Clear the old ref to the original allocation tag. */
+ clear_page_tag_ref(&old->page);
+ /* Decrement the counters of the tag on get_new_folio. */
+ alloc_tag_sub(&ref, folio_size(new));
+ __alloc_tag_ref_set(&ref, tag);
+ update_page_tag_ref(handle, &ref);
+ put_page_tag_ref(handle);
+}
+
+static void shutdown_mem_profiling(bool remove_file)
+{
+ if (mem_alloc_profiling_enabled())
+ static_branch_disable(&mem_alloc_profiling_key);
+
+ if (!mem_profiling_support)
+ return;
+
+ if (remove_file)
+ remove_proc_entry(ALLOCINFO_FILE_NAME, NULL);
+ mem_profiling_support = false;
+}
+
static void __init procfs_init(void)
{
- proc_create_seq("allocinfo", 0400, NULL, &allocinfo_seq_op);
+ if (!mem_profiling_support)
+ return;
+
+ if (!proc_create_seq(ALLOCINFO_FILE_NAME, 0400, NULL, &allocinfo_seq_op)) {
+ pr_err("Failed to create %s file\n", ALLOCINFO_FILE_NAME);
+ shutdown_mem_profiling(false);
+ }
+}
+
+void __init alloc_tag_sec_init(void)
+{
+ struct alloc_tag *last_codetag;
+
+ if (!mem_profiling_support)
+ return;
+
+ if (!static_key_enabled(&mem_profiling_compressed))
+ return;
+
+ kernel_tags.first_tag = (struct alloc_tag *)kallsyms_lookup_name(
+ SECTION_START(ALLOC_TAG_SECTION_NAME));
+ last_codetag = (struct alloc_tag *)kallsyms_lookup_name(
+ SECTION_STOP(ALLOC_TAG_SECTION_NAME));
+ kernel_tags.count = last_codetag - kernel_tags.first_tag;
+
+ /* Check if kernel tags fit into page flags */
+ if (kernel_tags.count > (1UL << NR_UNUSED_PAGEFLAG_BITS)) {
+ shutdown_mem_profiling(false); /* allocinfo file does not exist yet */
+ pr_err("%lu allocation tags cannot be references using %d available page flag bits. Memory allocation profiling is disabled!\n",
+ kernel_tags.count, NR_UNUSED_PAGEFLAG_BITS);
+ return;
+ }
+
+ alloc_tag_ref_offs = (LRU_REFS_PGOFF - NR_UNUSED_PAGEFLAG_BITS);
+ alloc_tag_ref_mask = ((1UL << NR_UNUSED_PAGEFLAG_BITS) - 1);
+ pr_debug("Memory allocation profiling compression is using %d page flag bits!\n",
+ NR_UNUSED_PAGEFLAG_BITS);
+}
+
+#ifdef CONFIG_MODULES
+
+static struct maple_tree mod_area_mt = MTREE_INIT(mod_area_mt, MT_FLAGS_ALLOC_RANGE);
+static struct vm_struct *vm_module_tags;
+/* A dummy object used to indicate an unloaded module */
+static struct module unloaded_mod;
+/* A dummy object used to indicate a module prepended area */
+static struct module prepend_mod;
+
+struct alloc_tag_module_section module_tags;
+
+static inline unsigned long alloc_tag_align(unsigned long val)
+{
+ if (!static_key_enabled(&mem_profiling_compressed)) {
+ /* No alignment requirements when we are not indexing the tags */
+ return val;
+ }
+
+ if (val % sizeof(struct alloc_tag) == 0)
+ return val;
+ return ((val / sizeof(struct alloc_tag)) + 1) * sizeof(struct alloc_tag);
}
-static bool alloc_tag_module_unload(struct codetag_type *cttype,
- struct codetag_module *cmod)
+static bool ensure_alignment(unsigned long align, unsigned int *prepend)
{
- struct codetag_iterator iter = codetag_get_ct_iter(cttype);
- struct alloc_tag_counters counter;
- bool module_unused = true;
+ if (!static_key_enabled(&mem_profiling_compressed)) {
+ /* No alignment requirements when we are not indexing the tags */
+ return true;
+ }
+
+ /*
+ * If alloc_tag size is not a multiple of required alignment, tag
+ * indexing does not work.
+ */
+ if (!IS_ALIGNED(sizeof(struct alloc_tag), align))
+ return false;
+
+ /* Ensure prepend consumes multiple of alloc_tag-sized blocks */
+ if (*prepend)
+ *prepend = alloc_tag_align(*prepend);
+
+ return true;
+}
+
+static inline bool tags_addressable(void)
+{
+ unsigned long tag_idx_count;
+
+ if (!static_key_enabled(&mem_profiling_compressed))
+ return true; /* with page_ext tags are always addressable */
+
+ tag_idx_count = CODETAG_ID_FIRST + kernel_tags.count +
+ module_tags.size / sizeof(struct alloc_tag);
+
+ return tag_idx_count < (1UL << NR_UNUSED_PAGEFLAG_BITS);
+}
+
+static bool needs_section_mem(struct module *mod, unsigned long size)
+{
+ if (!mem_profiling_support)
+ return false;
+
+ return size >= sizeof(struct alloc_tag);
+}
+
+static struct alloc_tag *find_used_tag(struct alloc_tag *from, struct alloc_tag *to)
+{
+ while (from <= to) {
+ struct alloc_tag_counters counter;
+
+ counter = alloc_tag_read(from);
+ if (counter.bytes)
+ return from;
+ from++;
+ }
+
+ return NULL;
+}
+
+/* Called with mod_area_mt locked */
+static void clean_unused_module_areas_locked(void)
+{
+ MA_STATE(mas, &mod_area_mt, 0, module_tags.size);
+ struct module *val;
+
+ mas_for_each(&mas, val, module_tags.size) {
+ if (val != &unloaded_mod)
+ continue;
+
+ /* Release area if all tags are unused */
+ if (!find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index),
+ (struct alloc_tag *)(module_tags.start_addr + mas.last)))
+ mas_erase(&mas);
+ }
+}
+
+/* Called with mod_area_mt locked */
+static bool find_aligned_area(struct ma_state *mas, unsigned long section_size,
+ unsigned long size, unsigned int prepend, unsigned long align)
+{
+ bool cleanup_done = false;
+
+repeat:
+ /* Try finding exact size and hope the start is aligned */
+ if (!mas_empty_area(mas, 0, section_size - 1, prepend + size)) {
+ if (IS_ALIGNED(mas->index + prepend, align))
+ return true;
+
+ /* Try finding larger area to align later */
+ mas_reset(mas);
+ if (!mas_empty_area(mas, 0, section_size - 1,
+ size + prepend + align - 1))
+ return true;
+ }
+
+ /* No free area, try cleanup stale data and repeat the search once */
+ if (!cleanup_done) {
+ clean_unused_module_areas_locked();
+ cleanup_done = true;
+ mas_reset(mas);
+ goto repeat;
+ }
+
+ return false;
+}
+
+static int vm_module_tags_populate(void)
+{
+ unsigned long phys_size = vm_module_tags->nr_pages << PAGE_SHIFT;
+
+ if (phys_size < module_tags.size) {
+ struct page **next_page = vm_module_tags->pages + vm_module_tags->nr_pages;
+ unsigned long addr = module_tags.start_addr + phys_size;
+ unsigned long more_pages;
+ unsigned long nr;
+
+ more_pages = ALIGN(module_tags.size - phys_size, PAGE_SIZE) >> PAGE_SHIFT;
+ nr = alloc_pages_bulk_array_node(GFP_KERNEL | __GFP_NOWARN,
+ NUMA_NO_NODE, more_pages, next_page);
+ if (nr < more_pages ||
+ vmap_pages_range(addr, addr + (nr << PAGE_SHIFT), PAGE_KERNEL,
+ next_page, PAGE_SHIFT) < 0) {
+ /* Clean up and error out */
+ for (int i = 0; i < nr; i++)
+ __free_page(next_page[i]);
+ return -ENOMEM;
+ }
+ vm_module_tags->nr_pages += nr;
+ }
+
+ return 0;
+}
+
+static void *reserve_module_tags(struct module *mod, unsigned long size,
+ unsigned int prepend, unsigned long align)
+{
+ unsigned long section_size = module_tags.end_addr - module_tags.start_addr;
+ MA_STATE(mas, &mod_area_mt, 0, section_size - 1);
+ unsigned long offset;
+ void *ret = NULL;
+
+ /* If no tags return error */
+ if (size < sizeof(struct alloc_tag))
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * align is always power of 2, so we can use IS_ALIGNED and ALIGN.
+ * align 0 or 1 means no alignment, to simplify set to 1.
+ */
+ if (!align)
+ align = 1;
+
+ if (!ensure_alignment(align, &prepend)) {
+ shutdown_mem_profiling(true);
+ pr_err("%s: alignment %lu is incompatible with allocation tag indexing. Memory allocation profiling is disabled!\n",
+ mod->name, align);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mas_lock(&mas);
+ if (!find_aligned_area(&mas, section_size, size, prepend, align)) {
+ ret = ERR_PTR(-ENOMEM);
+ goto unlock;
+ }
+
+ /* Mark found area as reserved */
+ offset = mas.index;
+ offset += prepend;
+ offset = ALIGN(offset, align);
+ if (offset != mas.index) {
+ unsigned long pad_start = mas.index;
+
+ mas.last = offset - 1;
+ mas_store(&mas, &prepend_mod);
+ if (mas_is_err(&mas)) {
+ ret = ERR_PTR(xa_err(mas.node));
+ goto unlock;
+ }
+ mas.index = offset;
+ mas.last = offset + size - 1;
+ mas_store(&mas, mod);
+ if (mas_is_err(&mas)) {
+ mas.index = pad_start;
+ mas_erase(&mas);
+ ret = ERR_PTR(xa_err(mas.node));
+ }
+ } else {
+ mas.last = offset + size - 1;
+ mas_store(&mas, mod);
+ if (mas_is_err(&mas))
+ ret = ERR_PTR(xa_err(mas.node));
+ }
+unlock:
+ mas_unlock(&mas);
+
+ if (IS_ERR(ret))
+ return ret;
+
+ if (module_tags.size < offset + size) {
+ int grow_res;
+
+ module_tags.size = offset + size;
+ if (mem_alloc_profiling_enabled() && !tags_addressable()) {
+ shutdown_mem_profiling(true);
+ pr_warn("With module %s there are too many tags to fit in %d page flag bits. Memory allocation profiling is disabled!\n",
+ mod->name, NR_UNUSED_PAGEFLAG_BITS);
+ }
+
+ grow_res = vm_module_tags_populate();
+ if (grow_res) {
+ shutdown_mem_profiling(true);
+ pr_err("Failed to allocate memory for allocation tags in the module %s. Memory allocation profiling is disabled!\n",
+ mod->name);
+ return ERR_PTR(grow_res);
+ }
+ }
+
+ return (struct alloc_tag *)(module_tags.start_addr + offset);
+}
+
+static void release_module_tags(struct module *mod, bool used)
+{
+ MA_STATE(mas, &mod_area_mt, module_tags.size, module_tags.size);
struct alloc_tag *tag;
- struct codetag *ct;
+ struct module *val;
+
+ mas_lock(&mas);
+ mas_for_each_rev(&mas, val, 0)
+ if (val == mod)
+ break;
+
+ if (!val) /* module not found */
+ goto out;
+
+ if (!used)
+ goto release_area;
+
+ /* Find out if the area is used */
+ tag = find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index),
+ (struct alloc_tag *)(module_tags.start_addr + mas.last));
+ if (tag) {
+ struct alloc_tag_counters counter = alloc_tag_read(tag);
+
+ pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n",
+ tag->ct.filename, tag->ct.lineno, tag->ct.modname,
+ tag->ct.function, counter.bytes);
+ } else {
+ used = false;
+ }
+release_area:
+ mas_store(&mas, used ? &unloaded_mod : NULL);
+ val = mas_prev_range(&mas, 0);
+ if (val == &prepend_mod)
+ mas_store(&mas, NULL);
+out:
+ mas_unlock(&mas);
+}
- for (ct = codetag_next_ct(&iter); ct; ct = codetag_next_ct(&iter)) {
- if (iter.cmod != cmod)
+static void replace_module(struct module *mod, struct module *new_mod)
+{
+ MA_STATE(mas, &mod_area_mt, 0, module_tags.size);
+ struct module *val;
+
+ mas_lock(&mas);
+ mas_for_each(&mas, val, module_tags.size) {
+ if (val != mod)
continue;
- tag = ct_to_alloc_tag(ct);
- counter = alloc_tag_read(tag);
+ mas_store_gfp(&mas, new_mod, GFP_KERNEL);
+ break;
+ }
+ mas_unlock(&mas);
+}
- if (WARN(counter.bytes,
- "%s:%u module %s func:%s has %llu allocated at module unload",
- ct->filename, ct->lineno, ct->modname, ct->function, counter.bytes))
- module_unused = false;
+static int __init alloc_mod_tags_mem(void)
+{
+ /* Map space to copy allocation tags */
+ vm_module_tags = execmem_vmap(MODULE_ALLOC_TAG_VMAP_SIZE);
+ if (!vm_module_tags) {
+ pr_err("Failed to map %lu bytes for module allocation tags\n",
+ MODULE_ALLOC_TAG_VMAP_SIZE);
+ module_tags.start_addr = 0;
+ return -ENOMEM;
}
- return module_unused;
+ vm_module_tags->pages = kmalloc_array(get_vm_area_size(vm_module_tags) >> PAGE_SHIFT,
+ sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
+ if (!vm_module_tags->pages) {
+ free_vm_area(vm_module_tags);
+ return -ENOMEM;
+ }
+
+ module_tags.start_addr = (unsigned long)vm_module_tags->addr;
+ module_tags.end_addr = module_tags.start_addr + MODULE_ALLOC_TAG_VMAP_SIZE;
+ /* Ensure the base is alloc_tag aligned when required for indexing */
+ module_tags.start_addr = alloc_tag_align(module_tags.start_addr);
+
+ return 0;
+}
+
+static void __init free_mod_tags_mem(void)
+{
+ int i;
+
+ module_tags.start_addr = 0;
+ for (i = 0; i < vm_module_tags->nr_pages; i++)
+ __free_page(vm_module_tags->pages[i]);
+ kfree(vm_module_tags->pages);
+ free_vm_area(vm_module_tags);
}
-#ifdef CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT
-static bool mem_profiling_support __meminitdata = true;
-#else
-static bool mem_profiling_support __meminitdata;
-#endif
+#else /* CONFIG_MODULES */
+
+static inline int alloc_mod_tags_mem(void) { return 0; }
+static inline void free_mod_tags_mem(void) {}
+
+#endif /* CONFIG_MODULES */
+/* See: Documentation/mm/allocation-profiling.rst */
static int __init setup_early_mem_profiling(char *str)
{
+ bool compressed = false;
bool enable;
if (!str || !str[0])
@@ -190,22 +615,37 @@ static int __init setup_early_mem_profiling(char *str)
if (!strncmp(str, "never", 5)) {
enable = false;
mem_profiling_support = false;
+ pr_info("Memory allocation profiling is disabled!\n");
} else {
- int res;
+ char *token = strsep(&str, ",");
- res = kstrtobool(str, &enable);
- if (res)
- return res;
+ if (kstrtobool(token, &enable))
+ return -EINVAL;
+ if (str) {
+
+ if (strcmp(str, "compressed"))
+ return -EINVAL;
+
+ compressed = true;
+ }
mem_profiling_support = true;
+ pr_info("Memory allocation profiling is enabled %s compression and is turned %s!\n",
+ compressed ? "with" : "without", enable ? "on" : "off");
}
- if (enable != static_key_enabled(&mem_alloc_profiling_key)) {
+ if (enable != mem_alloc_profiling_enabled()) {
if (enable)
static_branch_enable(&mem_alloc_profiling_key);
else
static_branch_disable(&mem_alloc_profiling_key);
}
+ if (compressed != static_key_enabled(&mem_profiling_compressed)) {
+ if (compressed)
+ static_branch_enable(&mem_profiling_compressed);
+ else
+ static_branch_disable(&mem_profiling_compressed);
+ }
return 0;
}
@@ -213,6 +653,9 @@ early_param("sysctl.vm.mem_profiling", setup_early_mem_profiling);
static __init bool need_page_alloc_tagging(void)
{
+ if (static_key_enabled(&mem_profiling_compressed))
+ return false;
+
return mem_profiling_support;
}
@@ -255,14 +698,26 @@ static inline void sysctl_init(void) {}
static int __init alloc_tag_init(void)
{
const struct codetag_type_desc desc = {
- .section = "alloc_tags",
- .tag_size = sizeof(struct alloc_tag),
- .module_unload = alloc_tag_module_unload,
+ .section = ALLOC_TAG_SECTION_NAME,
+ .tag_size = sizeof(struct alloc_tag),
+#ifdef CONFIG_MODULES
+ .needs_section_mem = needs_section_mem,
+ .alloc_section_mem = reserve_module_tags,
+ .free_section_mem = release_module_tags,
+ .module_replaced = replace_module,
+#endif
};
+ int res;
+
+ res = alloc_mod_tags_mem();
+ if (res)
+ return res;
alloc_tag_cttype = codetag_register_type(&desc);
- if (IS_ERR(alloc_tag_cttype))
+ if (IS_ERR(alloc_tag_cttype)) {
+ free_mod_tags_mem();
return PTR_ERR(alloc_tag_cttype);
+ }
sysctl_init();
procfs_init();
diff --git a/lib/codetag.c b/lib/codetag.c
index d1fbbb7c2ec3..42aadd6c1454 100644
--- a/lib/codetag.c
+++ b/lib/codetag.c
@@ -149,8 +149,8 @@ static struct codetag_range get_section_range(struct module *mod,
const char *section)
{
return (struct codetag_range) {
- get_symbol(mod, "__start_", section),
- get_symbol(mod, "__stop_", section),
+ get_symbol(mod, CODETAG_SECTION_START_PREFIX, section),
+ get_symbol(mod, CODETAG_SECTION_STOP_PREFIX, section),
};
}
@@ -207,6 +207,94 @@ static int codetag_module_init(struct codetag_type *cttype, struct module *mod)
}
#ifdef CONFIG_MODULES
+#define CODETAG_SECTION_PREFIX ".codetag."
+
+/* Some codetag types need a separate module section */
+bool codetag_needs_module_section(struct module *mod, const char *name,
+ unsigned long size)
+{
+ const char *type_name;
+ struct codetag_type *cttype;
+ bool ret = false;
+
+ if (strncmp(name, CODETAG_SECTION_PREFIX, strlen(CODETAG_SECTION_PREFIX)))
+ return false;
+
+ type_name = name + strlen(CODETAG_SECTION_PREFIX);
+ mutex_lock(&codetag_lock);
+ list_for_each_entry(cttype, &codetag_types, link) {
+ if (strcmp(type_name, cttype->desc.section) == 0) {
+ if (!cttype->desc.needs_section_mem)
+ break;
+
+ down_write(&cttype->mod_lock);
+ ret = cttype->desc.needs_section_mem(mod, size);
+ up_write(&cttype->mod_lock);
+ break;
+ }
+ }
+ mutex_unlock(&codetag_lock);
+
+ return ret;
+}
+
+void *codetag_alloc_module_section(struct module *mod, const char *name,
+ unsigned long size, unsigned int prepend,
+ unsigned long align)
+{
+ const char *type_name = name + strlen(CODETAG_SECTION_PREFIX);
+ struct codetag_type *cttype;
+ void *ret = ERR_PTR(-EINVAL);
+
+ mutex_lock(&codetag_lock);
+ list_for_each_entry(cttype, &codetag_types, link) {
+ if (strcmp(type_name, cttype->desc.section) == 0) {
+ if (WARN_ON(!cttype->desc.alloc_section_mem))
+ break;
+
+ down_write(&cttype->mod_lock);
+ ret = cttype->desc.alloc_section_mem(mod, size, prepend, align);
+ up_write(&cttype->mod_lock);
+ break;
+ }
+ }
+ mutex_unlock(&codetag_lock);
+
+ return ret;
+}
+
+void codetag_free_module_sections(struct module *mod)
+{
+ struct codetag_type *cttype;
+
+ mutex_lock(&codetag_lock);
+ list_for_each_entry(cttype, &codetag_types, link) {
+ if (!cttype->desc.free_section_mem)
+ continue;
+
+ down_write(&cttype->mod_lock);
+ cttype->desc.free_section_mem(mod, false);
+ up_write(&cttype->mod_lock);
+ }
+ mutex_unlock(&codetag_lock);
+}
+
+void codetag_module_replaced(struct module *mod, struct module *new_mod)
+{
+ struct codetag_type *cttype;
+
+ mutex_lock(&codetag_lock);
+ list_for_each_entry(cttype, &codetag_types, link) {
+ if (!cttype->desc.module_replaced)
+ continue;
+
+ down_write(&cttype->mod_lock);
+ cttype->desc.module_replaced(mod, new_mod);
+ up_write(&cttype->mod_lock);
+ }
+ mutex_unlock(&codetag_lock);
+}
+
void codetag_load_module(struct module *mod)
{
struct codetag_type *cttype;
@@ -220,13 +308,12 @@ void codetag_load_module(struct module *mod)
mutex_unlock(&codetag_lock);
}
-bool codetag_unload_module(struct module *mod)
+void codetag_unload_module(struct module *mod)
{
struct codetag_type *cttype;
- bool unload_ok = true;
if (!mod)
- return true;
+ return;
/* await any module's kfree_rcu() operations to complete */
kvfree_rcu_barrier();
@@ -246,18 +333,17 @@ bool codetag_unload_module(struct module *mod)
}
if (found) {
if (cttype->desc.module_unload)
- if (!cttype->desc.module_unload(cttype, cmod))
- unload_ok = false;
+ cttype->desc.module_unload(cttype, cmod);
cttype->count -= range_size(cttype, &cmod->range);
idr_remove(&cttype->mod_idr, mod_id);
kfree(cmod);
}
up_write(&cttype->mod_lock);
+ if (found && cttype->desc.free_section_mem)
+ cttype->desc.free_section_mem(mod, true);
}
mutex_unlock(&codetag_lock);
-
- return unload_ok;
}
#endif /* CONFIG_MODULES */
diff --git a/lib/crc16_kunit.c b/lib/crc16_kunit.c
new file mode 100644
index 000000000000..0918c98a96d2
--- /dev/null
+++ b/lib/crc16_kunit.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnits tests for CRC16.
+ *
+ * Copyright (C) 2024, LKCAMP
+ * Author: Vinicius Peixoto <vpeixoto@lkcamp.dev>
+ * Author: Fabricio Gasperin <fgasperin@lkcamp.dev>
+ * Author: Enzo Bertoloti <ebertoloti@lkcamp.dev>
+ */
+#include <kunit/test.h>
+#include <linux/crc16.h>
+#include <linux/prandom.h>
+
+#define CRC16_KUNIT_DATA_SIZE 4096
+#define CRC16_KUNIT_TEST_SIZE 100
+#define CRC16_KUNIT_SEED 0x12345678
+
+/**
+ * struct crc16_test - CRC16 test data
+ * @crc: initial input value to CRC16
+ * @start: Start index within the data buffer
+ * @length: Length of the data
+ */
+static struct crc16_test {
+ u16 crc;
+ u16 start;
+ u16 length;
+} tests[CRC16_KUNIT_TEST_SIZE];
+
+u8 data[CRC16_KUNIT_DATA_SIZE];
+
+
+/* Naive implementation of CRC16 for validation purposes */
+static inline u16 _crc16_naive_byte(u16 crc, u8 data)
+{
+ u8 i = 0;
+
+ crc ^= (u16) data;
+ for (i = 0; i < 8; i++) {
+ if (crc & 0x01)
+ crc = (crc >> 1) ^ 0xa001;
+ else
+ crc = crc >> 1;
+ }
+
+ return crc;
+}
+
+
+static inline u16 _crc16_naive(u16 crc, u8 *buffer, size_t len)
+{
+ while (len--)
+ crc = _crc16_naive_byte(crc, *buffer++);
+ return crc;
+}
+
+
+/* Small helper for generating pseudorandom 16-bit data */
+static inline u16 _rand16(void)
+{
+ static u32 rand = CRC16_KUNIT_SEED;
+
+ rand = next_pseudo_random32(rand);
+ return rand & 0xFFFF;
+}
+
+
+static int crc16_init_test_data(struct kunit_suite *suite)
+{
+ size_t i;
+
+ /* Fill the data buffer with random bytes */
+ for (i = 0; i < CRC16_KUNIT_DATA_SIZE; i++)
+ data[i] = _rand16() & 0xFF;
+
+ /* Generate random test data while ensuring the random
+ * start + length values won't overflow the 4096-byte
+ * buffer (0x7FF * 2 = 0xFFE < 0x1000)
+ */
+ for (size_t i = 0; i < CRC16_KUNIT_TEST_SIZE; i++) {
+ tests[i].crc = _rand16();
+ tests[i].start = _rand16() & 0x7FF;
+ tests[i].length = _rand16() & 0x7FF;
+ }
+
+ return 0;
+}
+
+static void crc16_test_empty(struct kunit *test)
+{
+ u16 crc;
+
+ /* The result for empty data should be the same as the
+ * initial crc
+ */
+ crc = crc16(0x00, data, 0);
+ KUNIT_EXPECT_EQ(test, crc, 0);
+ crc = crc16(0xFF, data, 0);
+ KUNIT_EXPECT_EQ(test, crc, 0xFF);
+}
+
+static void crc16_test_correctness(struct kunit *test)
+{
+ size_t i;
+ u16 crc, crc_naive;
+
+ for (i = 0; i < CRC16_KUNIT_TEST_SIZE; i++) {
+ /* Compare results with the naive crc16 implementation */
+ crc = crc16(tests[i].crc, data + tests[i].start,
+ tests[i].length);
+ crc_naive = _crc16_naive(tests[i].crc, data + tests[i].start,
+ tests[i].length);
+ KUNIT_EXPECT_EQ(test, crc, crc_naive);
+ }
+}
+
+
+static void crc16_test_combine(struct kunit *test)
+{
+ size_t i, j;
+ u16 crc, crc_naive;
+
+ /* Make sure that combining two consecutive crc16 calculations
+ * yields the same result as calculating the crc16 for the whole thing
+ */
+ for (i = 0; i < CRC16_KUNIT_TEST_SIZE; i++) {
+ crc_naive = crc16(tests[i].crc, data + tests[i].start, tests[i].length);
+ for (j = 0; j < tests[i].length; j++) {
+ crc = crc16(tests[i].crc, data + tests[i].start, j);
+ crc = crc16(crc, data + tests[i].start + j, tests[i].length - j);
+ KUNIT_EXPECT_EQ(test, crc, crc_naive);
+ }
+ }
+}
+
+
+static struct kunit_case crc16_test_cases[] = {
+ KUNIT_CASE(crc16_test_empty),
+ KUNIT_CASE(crc16_test_combine),
+ KUNIT_CASE(crc16_test_correctness),
+ {},
+};
+
+static struct kunit_suite crc16_test_suite = {
+ .name = "crc16",
+ .test_cases = crc16_test_cases,
+ .suite_init = crc16_init_test_data,
+};
+kunit_test_suite(crc16_test_suite);
+
+MODULE_AUTHOR("Fabricio Gasperin <fgasperin@lkcamp.dev>");
+MODULE_AUTHOR("Vinicius Peixoto <vpeixoto@lkcamp.dev>");
+MODULE_AUTHOR("Enzo Bertoloti <ebertoloti@lkcamp.dev>");
+MODULE_DESCRIPTION("Unit tests for crc16");
+MODULE_LICENSE("GPL");
diff --git a/lib/list-test.c b/lib/list-test.c
index e207c4c98d70..9135cdc1bb39 100644
--- a/lib/list-test.c
+++ b/lib/list-test.c
@@ -412,6 +412,8 @@ static void list_test_list_cut_position(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
i++;
}
+
+ KUNIT_EXPECT_EQ(test, i, 3);
}
static void list_test_list_cut_before(struct kunit *test)
@@ -440,6 +442,8 @@ static void list_test_list_cut_before(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
i++;
}
+
+ KUNIT_EXPECT_EQ(test, i, 3);
}
static void list_test_list_splice(struct kunit *test)
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 0fb59e92ca2d..8d3f623536fe 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -1,9 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/export.h>
-#include <linux/string.h>
#include <linux/list_sort.h>
#include <linux/list.h>
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 3619301dda2e..d0ae808f3a14 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -64,6 +64,21 @@
#define CREATE_TRACE_POINTS
#include <trace/events/maple_tree.h>
+/*
+ * Kernel pointer hashing renders much of the maple tree dump useless as tagged
+ * pointers get hashed to arbitrary values.
+ *
+ * If CONFIG_DEBUG_VM_MAPLE_TREE is set we are in a debug mode where it is
+ * permissible to bypass this. Otherwise remain cautious and retain the hashing.
+ *
+ * Userland doesn't know about %px so also use %p there.
+ */
+#if defined(__KERNEL__) && defined(CONFIG_DEBUG_VM_MAPLE_TREE)
+#define PTR_FMT "%px"
+#else
+#define PTR_FMT "%p"
+#endif
+
#define MA_ROOT_PARENT 1
/*
@@ -120,7 +135,6 @@ static const unsigned char mt_min_slots[] = {
#define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1)
struct maple_big_node {
- struct maple_pnode *parent;
unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
union {
struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
@@ -1193,19 +1207,17 @@ static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
reuse->request_count = 0;
reuse->node_count = 0;
- if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) {
- head->slot[head->node_count++] = reuse;
- head->total++;
- goto done;
- }
-
- reuse->total = 1;
- if ((head) && !((unsigned long)head & 0x1)) {
+ if (count) {
+ if (head->node_count < MAPLE_ALLOC_SLOTS) {
+ head->slot[head->node_count++] = reuse;
+ head->total++;
+ goto done;
+ }
reuse->slot[0] = head;
reuse->node_count = 1;
- reuse->total += head->total;
}
+ reuse->total = count + 1;
mas->alloc = reuse;
done:
if (requested > 1)
@@ -1251,11 +1263,11 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
mas->alloc = node;
node->total = ++allocated;
+ node->request_count = 0;
requested--;
}
node = mas->alloc;
- node->request_count = 0;
while (requested) {
max_req = MAPLE_ALLOC_SLOTS - node->node_count;
slots = (void **)&node->slot[node->node_count];
@@ -1271,7 +1283,10 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
node->node_count += count;
allocated += count;
- node = node->slot[0];
+ /* find a non-full node*/
+ do {
+ node = node->slot[0];
+ } while (unlikely(node->node_count == MAPLE_ALLOC_SLOTS));
requested -= count;
}
mas->alloc->total = allocated;
@@ -1280,10 +1295,9 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
nomem_bulk:
/* Clean up potential freed allocations on bulk failure */
memset(slots, 0, max_req * sizeof(unsigned long));
+ mas->alloc->total = allocated;
nomem_one:
mas_set_alloc_req(mas, requested);
- if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
- mas->alloc->total = allocated;
mas_set_err(mas, -ENOMEM);
}
@@ -1943,14 +1957,13 @@ static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
for (; i < piv_end; i++, j++) {
b_node->pivot[j] = pivots[i];
if (unlikely(!b_node->pivot[j]))
- break;
+ goto complete;
if (unlikely(mas->max == b_node->pivot[j]))
goto complete;
}
- if (likely(i <= mas_end))
- b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt);
+ b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt);
complete:
b_node->b_end = ++j;
@@ -2139,9 +2152,7 @@ static inline bool mas_prev_sibling(struct ma_state *mas)
{
unsigned int p_slot = mte_parent_slot(mas->node);
- if (mte_is_root(mas->node))
- return false;
-
+ /* For root node, p_slot is set to 0 by mte_parent_slot(). */
if (!p_slot)
return false;
@@ -3159,10 +3170,7 @@ static inline void mast_fill_bnode(struct maple_subtree_state *mast,
bool cp = true;
unsigned char split;
- memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap));
- memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot));
- memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot));
- mast->bn->b_end = 0;
+ memset(mast->bn, 0, sizeof(struct maple_big_node));
if (mte_is_root(mas->node)) {
cp = false;
@@ -3400,7 +3408,7 @@ static noinline_for_kasan void mas_commit_b_node(struct ma_wr_state *wr_mas,
* @mas: The maple state
* @entry: The entry to store into the tree
*/
-static inline int mas_root_expand(struct ma_state *mas, void *entry)
+static inline void mas_root_expand(struct ma_state *mas, void *entry)
{
void *contents = mas_root_locked(mas);
enum maple_type type = maple_leaf_64;
@@ -3436,12 +3444,23 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry)
ma_set_meta(node, maple_leaf_64, 0, slot);
/* swap the new root into the tree */
rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
- return slot;
+ return;
}
+/*
+ * mas_store_root() - Storing value into root.
+ * @mas: The maple state
+ * @entry: The entry to store.
+ *
+ * There is no root node now and we are storing a value into the root - this
+ * function either assigns the pointer or expands into a node.
+ */
static inline void mas_store_root(struct ma_state *mas, void *entry)
{
- if (likely((mas->last != 0) || (mas->index != 0)))
+ if (!entry) {
+ if (!mas->index)
+ rcu_assign_pointer(mas->tree->ma_root, NULL);
+ } else if (likely((mas->last != 0) || (mas->index != 0)))
mas_root_expand(mas, entry);
else if (((unsigned long) (entry) & 3) == 2)
mas_root_expand(mas, entry);
@@ -3662,7 +3681,9 @@ static inline void mas_new_root(struct ma_state *mas, void *entry)
void __rcu **slots;
unsigned long *pivots;
- if (!entry && !mas->index && mas->last == ULONG_MAX) {
+ WARN_ON_ONCE(mas->index || mas->last != ULONG_MAX);
+
+ if (!entry) {
mas->depth = 0;
mas_set_height(mas);
rcu_assign_pointer(mas->tree->ma_root, entry);
@@ -3889,7 +3910,8 @@ static inline void mas_wr_slot_store(struct ma_wr_state *wr_mas)
wr_mas->pivots[offset] = mas->index - 1;
mas->offset++; /* Keep mas accurate. */
}
- } else if (!mt_in_rcu(mas->tree)) {
+ } else {
+ WARN_ON_ONCE(mt_in_rcu(mas->tree));
/*
* Expand the range, only partially overwriting the previous and
* next ranges
@@ -3899,8 +3921,6 @@ static inline void mas_wr_slot_store(struct ma_wr_state *wr_mas)
wr_mas->pivots[offset] = mas->index - 1;
wr_mas->pivots[offset + 1] = mas->last;
mas->offset++; /* Keep mas accurate. */
- } else {
- return;
}
trace_ma_write(__func__, mas, 0, wr_mas->entry);
@@ -4181,75 +4201,53 @@ static inline int mas_prealloc_calc(struct ma_state *mas, void *entry)
}
/*
- * mas_wr_store_type() - Set the store type for a given
+ * mas_wr_store_type() - Determine the store type for a given
* store operation.
* @wr_mas: The maple write state
+ *
+ * Return: the type of store needed for the operation
*/
-static inline void mas_wr_store_type(struct ma_wr_state *wr_mas)
+static inline enum store_type mas_wr_store_type(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
unsigned char new_end;
- if (unlikely(mas_is_none(mas) || mas_is_ptr(mas))) {
- mas->store_type = wr_store_root;
- return;
- }
+ if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
+ return wr_store_root;
- if (unlikely(!mas_wr_walk(wr_mas))) {
- mas->store_type = wr_spanning_store;
- return;
- }
+ if (unlikely(!mas_wr_walk(wr_mas)))
+ return wr_spanning_store;
/* At this point, we are at the leaf node that needs to be altered. */
mas_wr_end_piv(wr_mas);
if (!wr_mas->entry)
mas_wr_extend_null(wr_mas);
- new_end = mas_wr_new_end(wr_mas);
- if ((wr_mas->r_min == mas->index) && (wr_mas->r_max == mas->last)) {
- mas->store_type = wr_exact_fit;
- return;
- }
+ if ((wr_mas->r_min == mas->index) && (wr_mas->r_max == mas->last))
+ return wr_exact_fit;
- if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
- mas->store_type = wr_new_root;
- return;
- }
+ if (unlikely(!mas->index && mas->last == ULONG_MAX))
+ return wr_new_root;
+ new_end = mas_wr_new_end(wr_mas);
/* Potential spanning rebalance collapsing a node */
if (new_end < mt_min_slots[wr_mas->type]) {
- if (!mte_is_root(mas->node) && !(mas->mas_flags & MA_STATE_BULK)) {
- mas->store_type = wr_rebalance;
- return;
- }
- mas->store_type = wr_node_store;
- return;
+ if (!mte_is_root(mas->node) && !(mas->mas_flags & MA_STATE_BULK))
+ return wr_rebalance;
+ return wr_node_store;
}
- if (new_end >= mt_slots[wr_mas->type]) {
- mas->store_type = wr_split_store;
- return;
- }
+ if (new_end >= mt_slots[wr_mas->type])
+ return wr_split_store;
- if (!mt_in_rcu(mas->tree) && (mas->offset == mas->end)) {
- mas->store_type = wr_append;
- return;
- }
+ if (!mt_in_rcu(mas->tree) && (mas->offset == mas->end))
+ return wr_append;
if ((new_end == mas->end) && (!mt_in_rcu(mas->tree) ||
- (wr_mas->offset_end - mas->offset == 1))) {
- mas->store_type = wr_slot_store;
- return;
- }
-
- if (mte_is_root(mas->node) || (new_end >= mt_min_slots[wr_mas->type]) ||
- (mas->mas_flags & MA_STATE_BULK)) {
- mas->store_type = wr_node_store;
- return;
- }
+ (wr_mas->offset_end - mas->offset == 1)))
+ return wr_slot_store;
- mas->store_type = wr_invalid;
- MAS_WARN_ON(mas, 1);
+ return wr_node_store;
}
/**
@@ -4264,7 +4262,7 @@ static inline void mas_wr_preallocate(struct ma_wr_state *wr_mas, void *entry)
int request;
mas_wr_prealloc_setup(wr_mas);
- mas_wr_store_type(wr_mas);
+ mas->store_type = mas_wr_store_type(wr_mas);
request = mas_prealloc_calc(mas, entry);
if (!request)
return;
@@ -5419,7 +5417,8 @@ void *mas_store(struct ma_state *mas, void *entry)
trace_ma_write(__func__, mas, 0, entry);
#ifdef CONFIG_DEBUG_MAPLE_TREE
if (MAS_WARN_ON(mas, mas->index > mas->last))
- pr_err("Error %lX > %lX %p\n", mas->index, mas->last, entry);
+ pr_err("Error %lX > %lX " PTR_FMT "\n", mas->index, mas->last,
+ entry);
if (mas->index > mas->last) {
mas_set_err(mas, -EINVAL);
@@ -5435,7 +5434,7 @@ void *mas_store(struct ma_state *mas, void *entry)
* overwrite multiple entries within a self-balancing B-Tree.
*/
mas_wr_prealloc_setup(&wr_mas);
- mas_wr_store_type(&wr_mas);
+ mas->store_type = mas_wr_store_type(&wr_mas);
if (mas->mas_flags & MA_STATE_PREALLOC) {
mas_wr_store_entry(&wr_mas);
MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
@@ -5538,7 +5537,7 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
int request;
mas_wr_prealloc_setup(&wr_mas);
- mas_wr_store_type(&wr_mas);
+ mas->store_type = mas_wr_store_type(&wr_mas);
request = mas_prealloc_calc(mas, entry);
if (!request)
return ret;
@@ -7124,14 +7123,14 @@ static void mt_dump_entry(void *entry, unsigned long min, unsigned long max,
mt_dump_range(min, max, depth, format);
if (xa_is_value(entry))
- pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry),
- xa_to_value(entry), entry);
+ pr_cont("value %ld (0x%lx) [" PTR_FMT "]\n", xa_to_value(entry),
+ xa_to_value(entry), entry);
else if (xa_is_zero(entry))
pr_cont("zero (%ld)\n", xa_to_internal(entry));
else if (mt_is_reserved(entry))
- pr_cont("UNKNOWN ENTRY (%p)\n", entry);
+ pr_cont("UNKNOWN ENTRY (" PTR_FMT ")\n", entry);
else
- pr_cont("%p\n", entry);
+ pr_cont(PTR_FMT "\n", entry);
}
static void mt_dump_range64(const struct maple_tree *mt, void *entry,
@@ -7147,13 +7146,13 @@ static void mt_dump_range64(const struct maple_tree *mt, void *entry,
for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) {
switch(format) {
case mt_dump_hex:
- pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
+ pr_cont(PTR_FMT " %lX ", node->slot[i], node->pivot[i]);
break;
case mt_dump_dec:
- pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
+ pr_cont(PTR_FMT " %lu ", node->slot[i], node->pivot[i]);
}
}
- pr_cont("%p\n", node->slot[i]);
+ pr_cont(PTR_FMT "\n", node->slot[i]);
for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
unsigned long last = max;
@@ -7175,11 +7174,11 @@ static void mt_dump_range64(const struct maple_tree *mt, void *entry,
if (last > max) {
switch(format) {
case mt_dump_hex:
- pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n",
+ pr_err("node " PTR_FMT " last (%lx) > max (%lx) at pivot %d!\n",
node, last, max, i);
break;
case mt_dump_dec:
- pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
+ pr_err("node " PTR_FMT " last (%lu) > max (%lu) at pivot %d!\n",
node, last, max, i);
}
}
@@ -7209,13 +7208,13 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++) {
switch (format) {
case mt_dump_hex:
- pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
+ pr_cont(PTR_FMT " %lX ", node->slot[i], node->pivot[i]);
break;
case mt_dump_dec:
- pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
+ pr_cont(PTR_FMT " %lu ", node->slot[i], node->pivot[i]);
}
}
- pr_cont("%p\n", node->slot[i]);
+ pr_cont(PTR_FMT "\n", node->slot[i]);
for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
unsigned long last = max;
@@ -7234,11 +7233,11 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
if (last > max) {
switch(format) {
case mt_dump_hex:
- pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n",
+ pr_err("node " PTR_FMT " last (%lx) > max (%lx) at pivot %d!\n",
node, last, max, i);
break;
case mt_dump_dec:
- pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
+ pr_err("node " PTR_FMT " last (%lu) > max (%lu) at pivot %d!\n",
node, last, max, i);
}
}
@@ -7256,8 +7255,8 @@ static void mt_dump_node(const struct maple_tree *mt, void *entry,
mt_dump_range(min, max, depth, format);
- pr_cont("node %p depth %d type %d parent %p", node, depth, type,
- node ? node->parent : NULL);
+ pr_cont("node " PTR_FMT " depth %d type %d parent " PTR_FMT, node,
+ depth, type, node ? node->parent : NULL);
switch (type) {
case maple_dense:
pr_cont("\n");
@@ -7285,12 +7284,14 @@ void mt_dump(const struct maple_tree *mt, enum mt_dump_format format)
{
void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt));
- pr_info("maple_tree(%p) flags %X, height %u root %p\n",
+ pr_info("maple_tree(" PTR_FMT ") flags %X, height %u root " PTR_FMT "\n",
mt, mt->ma_flags, mt_height(mt), entry);
- if (!xa_is_node(entry))
- mt_dump_entry(entry, 0, 0, 0, format);
- else if (entry)
+ if (xa_is_node(entry))
mt_dump_node(mt, entry, 0, mt_node_max(entry), 0, format);
+ else if (entry)
+ mt_dump_entry(entry, 0, 0, 0, format);
+ else
+ pr_info("(empty)\n");
}
EXPORT_SYMBOL_GPL(mt_dump);
@@ -7337,7 +7338,7 @@ static void mas_validate_gaps(struct ma_state *mas)
MT_BUG_ON(mas->tree, !entry);
if (gap > p_end - p_start + 1) {
- pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
+ pr_err(PTR_FMT "[%u] %lu >= %lu - %lu + 1 (%lu)\n",
mas_mn(mas), i, gap, p_end, p_start,
p_end - p_start + 1);
MT_BUG_ON(mas->tree, gap > p_end - p_start + 1);
@@ -7357,19 +7358,19 @@ counted:
MT_BUG_ON(mas->tree, !gaps);
offset = ma_meta_gap(node);
if (offset > i) {
- pr_err("gap offset %p[%u] is invalid\n", node, offset);
+ pr_err("gap offset " PTR_FMT "[%u] is invalid\n", node, offset);
MT_BUG_ON(mas->tree, 1);
}
if (gaps[offset] != max_gap) {
- pr_err("gap %p[%u] is not the largest gap %lu\n",
+ pr_err("gap " PTR_FMT "[%u] is not the largest gap %lu\n",
node, offset, max_gap);
MT_BUG_ON(mas->tree, 1);
}
for (i++ ; i < mt_slot_count(mte); i++) {
if (gaps[i] != 0) {
- pr_err("gap %p[%u] beyond node limit != 0\n",
+ pr_err("gap " PTR_FMT "[%u] beyond node limit != 0\n",
node, i);
MT_BUG_ON(mas->tree, 1);
}
@@ -7383,7 +7384,7 @@ counted:
p_mn = mte_parent(mte);
MT_BUG_ON(mas->tree, max_gap > mas->max);
if (ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap) {
- pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
+ pr_err("gap " PTR_FMT "[%u] != %lu\n", p_mn, p_slot, max_gap);
mt_dump(mas->tree, mt_dump_hex);
MT_BUG_ON(mas->tree, 1);
}
@@ -7413,11 +7414,11 @@ static void mas_validate_parent_slot(struct ma_state *mas)
node = mas_slot(mas, slots, i);
if (i == p_slot) {
if (node != mas->node)
- pr_err("parent %p[%u] does not have %p\n",
+ pr_err("parent " PTR_FMT "[%u] does not have " PTR_FMT "\n",
parent, i, mas_mn(mas));
MT_BUG_ON(mas->tree, node != mas->node);
} else if (node == mas->node) {
- pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
+ pr_err("Invalid child " PTR_FMT " at parent " PTR_FMT "[%u] p_slot %u\n",
mas_mn(mas), parent, i, p_slot);
MT_BUG_ON(mas->tree, node == mas->node);
}
@@ -7439,20 +7440,20 @@ static void mas_validate_child_slot(struct ma_state *mas)
child = mas_slot(mas, slots, i);
if (!child) {
- pr_err("Non-leaf node lacks child at %p[%u]\n",
+ pr_err("Non-leaf node lacks child at " PTR_FMT "[%u]\n",
mas_mn(mas), i);
MT_BUG_ON(mas->tree, 1);
}
if (mte_parent_slot(child) != i) {
- pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
+ pr_err("Slot error at " PTR_FMT "[%u]: child " PTR_FMT " has pslot %u\n",
mas_mn(mas), i, mte_to_node(child),
mte_parent_slot(child));
MT_BUG_ON(mas->tree, 1);
}
if (mte_parent(child) != mte_to_node(mas->node)) {
- pr_err("child %p has parent %p not %p\n",
+ pr_err("child " PTR_FMT " has parent " PTR_FMT " not " PTR_FMT "\n",
mte_to_node(child), mte_parent(child),
mte_to_node(mas->node));
MT_BUG_ON(mas->tree, 1);
@@ -7482,24 +7483,24 @@ static void mas_validate_limits(struct ma_state *mas)
piv = mas_safe_pivot(mas, pivots, i, type);
if (!piv && (i != 0)) {
- pr_err("Missing node limit pivot at %p[%u]",
+ pr_err("Missing node limit pivot at " PTR_FMT "[%u]",
mas_mn(mas), i);
MAS_WARN_ON(mas, 1);
}
if (prev_piv > piv) {
- pr_err("%p[%u] piv %lu < prev_piv %lu\n",
+ pr_err(PTR_FMT "[%u] piv %lu < prev_piv %lu\n",
mas_mn(mas), i, piv, prev_piv);
MAS_WARN_ON(mas, piv < prev_piv);
}
if (piv < mas->min) {
- pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
+ pr_err(PTR_FMT "[%u] %lu < %lu\n", mas_mn(mas), i,
piv, mas->min);
MAS_WARN_ON(mas, piv < mas->min);
}
if (piv > mas->max) {
- pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i,
+ pr_err(PTR_FMT "[%u] %lu > %lu\n", mas_mn(mas), i,
piv, mas->max);
MAS_WARN_ON(mas, piv > mas->max);
}
@@ -7509,7 +7510,7 @@ static void mas_validate_limits(struct ma_state *mas)
}
if (mas_data_end(mas) != i) {
- pr_err("node%p: data_end %u != the last slot offset %u\n",
+ pr_err("node" PTR_FMT ": data_end %u != the last slot offset %u\n",
mas_mn(mas), mas_data_end(mas), i);
MT_BUG_ON(mas->tree, 1);
}
@@ -7518,8 +7519,8 @@ static void mas_validate_limits(struct ma_state *mas)
void *entry = mas_slot(mas, slots, i);
if (entry && (i != mt_slots[type] - 1)) {
- pr_err("%p[%u] should not have entry %p\n", mas_mn(mas),
- i, entry);
+ pr_err(PTR_FMT "[%u] should not have entry " PTR_FMT "\n",
+ mas_mn(mas), i, entry);
MT_BUG_ON(mas->tree, entry != NULL);
}
@@ -7529,7 +7530,7 @@ static void mas_validate_limits(struct ma_state *mas)
if (!piv)
continue;
- pr_err("%p[%u] should not have piv %lu\n",
+ pr_err(PTR_FMT "[%u] should not have piv %lu\n",
mas_mn(mas), i, piv);
MAS_WARN_ON(mas, i < mt_pivots[type] - 1);
}
@@ -7554,7 +7555,7 @@ static void mt_validate_nulls(struct maple_tree *mt)
do {
entry = mas_slot(&mas, slots, offset);
if (!last && !entry) {
- pr_err("Sequential nulls end at %p[%u]\n",
+ pr_err("Sequential nulls end at " PTR_FMT "[%u]\n",
mas_mn(&mas), offset);
}
MT_BUG_ON(mt, !last && !entry);
@@ -7596,7 +7597,8 @@ void mt_validate(struct maple_tree *mt)
end = mas_data_end(&mas);
if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) &&
(mas.max != ULONG_MAX))) {
- pr_err("Invalid size %u of %p\n", end, mas_mn(&mas));
+ pr_err("Invalid size %u of " PTR_FMT "\n",
+ end, mas_mn(&mas));
}
mas_validate_parent_slot(&mas);
@@ -7612,7 +7614,8 @@ EXPORT_SYMBOL_GPL(mt_validate);
void mas_dump(const struct ma_state *mas)
{
- pr_err("MAS: tree=%p enode=%p ", mas->tree, mas->node);
+ pr_err("MAS: tree=" PTR_FMT " enode=" PTR_FMT " ",
+ mas->tree, mas->node);
switch (mas->status) {
case ma_active:
pr_err("(ma_active)");
@@ -7676,7 +7679,7 @@ void mas_dump(const struct ma_state *mas)
pr_err("[%u/%u] index=%lx last=%lx\n", mas->offset, mas->end,
mas->index, mas->last);
- pr_err(" min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n",
+ pr_err(" min=%lx max=%lx alloc=" PTR_FMT ", depth=%u, flags=%x\n",
mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags);
if (mas->index > mas->last)
pr_err("Check index & last\n");
@@ -7685,7 +7688,7 @@ EXPORT_SYMBOL_GPL(mas_dump);
void mas_wr_dump(const struct ma_wr_state *wr_mas)
{
- pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n",
+ pr_err("WR_MAS: node=" PTR_FMT " r_min=%lx r_max=%lx\n",
wr_mas->node, wr_mas->r_min, wr_mas->r_max);
pr_err(" type=%u off_end=%u, node_end=%u, end_piv=%lx\n",
wr_mas->type, wr_mas->offset_end, wr_mas->mas->end,
diff --git a/lib/min_heap.c b/lib/min_heap.c
new file mode 100644
index 000000000000..4485372ff3b1
--- /dev/null
+++ b/lib/min_heap.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/export.h>
+#include <linux/min_heap.h>
+
+void __min_heap_init(min_heap_char *heap, void *data, int size)
+{
+ __min_heap_init_inline(heap, data, size);
+}
+EXPORT_SYMBOL(__min_heap_init);
+
+void *__min_heap_peek(struct min_heap_char *heap)
+{
+ return __min_heap_peek_inline(heap);
+}
+EXPORT_SYMBOL(__min_heap_peek);
+
+bool __min_heap_full(min_heap_char *heap)
+{
+ return __min_heap_full_inline(heap);
+}
+EXPORT_SYMBOL(__min_heap_full);
+
+void __min_heap_sift_down(min_heap_char *heap, int pos, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args)
+{
+ __min_heap_sift_down_inline(heap, pos, elem_size, func, args);
+}
+EXPORT_SYMBOL(__min_heap_sift_down);
+
+void __min_heap_sift_up(min_heap_char *heap, size_t elem_size, size_t idx,
+ const struct min_heap_callbacks *func, void *args)
+{
+ __min_heap_sift_up_inline(heap, elem_size, idx, func, args);
+}
+EXPORT_SYMBOL(__min_heap_sift_up);
+
+void __min_heapify_all(min_heap_char *heap, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args)
+{
+ __min_heapify_all_inline(heap, elem_size, func, args);
+}
+EXPORT_SYMBOL(__min_heapify_all);
+
+bool __min_heap_pop(min_heap_char *heap, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args)
+{
+ return __min_heap_pop_inline(heap, elem_size, func, args);
+}
+EXPORT_SYMBOL(__min_heap_pop);
+
+void __min_heap_pop_push(min_heap_char *heap, const void *element, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args)
+{
+ __min_heap_pop_push_inline(heap, element, elem_size, func, args);
+}
+EXPORT_SYMBOL(__min_heap_pop_push);
+
+bool __min_heap_push(min_heap_char *heap, const void *element, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args)
+{
+ return __min_heap_push_inline(heap, element, elem_size, func, args);
+}
+EXPORT_SYMBOL(__min_heap_push);
+
+bool __min_heap_del(min_heap_char *heap, size_t elem_size, size_t idx,
+ const struct min_heap_callbacks *func, void *args)
+{
+ return __min_heap_del_inline(heap, elem_size, idx, func, args);
+}
+EXPORT_SYMBOL(__min_heap_del);
diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c
index 2abc78367dd1..5222c6393f11 100644
--- a/lib/overflow_kunit.c
+++ b/lib/overflow_kunit.c
@@ -1187,7 +1187,7 @@ static void DEFINE_FLEX_test(struct kunit *test)
{
/* Using _RAW_ on a __counted_by struct will initialize "counter" to zero */
DEFINE_RAW_FLEX(struct foo, two_but_zero, array, 2);
-#if __has_attribute(__counted_by__)
+#ifdef CONFIG_CC_HAS_COUNTED_BY
int expected_raw_size = sizeof(struct foo);
#else
int expected_raw_size = sizeof(struct foo) + 2 * sizeof(s16);
diff --git a/lib/percpu_test.c b/lib/percpu_test.c
index 4a3d70bbc1a0..ce7124b16dab 100644
--- a/lib/percpu_test.c
+++ b/lib/percpu_test.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/limits.h>
#include <linux/module.h>
/* validate @native and @pcp counter values match @expected */
@@ -24,8 +25,9 @@ static int __init percpu_test_init(void)
* +ul_one/-ul_one below would replace with inc/dec instructions.
*/
volatile unsigned int ui_one = 1;
- long l = 0;
+ unsigned long long ull = 0;
unsigned long ul = 0;
+ long l = 0;
pr_info("percpu test start\n");
@@ -112,6 +114,13 @@ static int __init percpu_test_init(void)
CHECK(ul, ulong_counter, -1);
CHECK(ul, ulong_counter, ULONG_MAX);
+ ul = ull = 0;
+ __this_cpu_write(ulong_counter, 0);
+
+ ul = ull += UINT_MAX;
+ __this_cpu_add(ulong_counter, ull);
+ CHECK(ul, ulong_counter, UINT_MAX);
+
ul = 3;
__this_cpu_write(ulong_counter, 3);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 473b2646f71c..5bb6b8aff232 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -474,14 +474,14 @@ int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
return -EOPNOTSUPP;
if (sgt_append->prv) {
- unsigned long next_pfn = (page_to_phys(sg_page(sgt_append->prv)) +
- sgt_append->prv->offset + sgt_append->prv->length) / PAGE_SIZE;
+ unsigned long next_pfn;
if (WARN_ON(offset))
return -EINVAL;
/* Merge contiguous pages into the last SG */
prv_len = sgt_append->prv->length;
+ next_pfn = (sg_phys(sgt_append->prv) + prv_len) / PAGE_SIZE;
if (page_to_pfn(pages[0]) == next_pfn) {
last_pg = pfn_to_page(next_pfn - 1);
while (n_pages && pages_are_mergeable(pages[0], last_pg)) {
diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
index 33564f965958..f11691315c2f 100644
--- a/lib/slub_kunit.c
+++ b/lib/slub_kunit.c
@@ -192,6 +192,47 @@ static void test_leak_destroy(struct kunit *test)
KUNIT_EXPECT_EQ(test, 2, slab_errors);
}
+static void test_krealloc_redzone_zeroing(struct kunit *test)
+{
+ u8 *p;
+ int i;
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_krealloc", 64,
+ SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
+
+ p = alloc_hooks(__kmalloc_cache_noprof(s, GFP_KERNEL, 48));
+ memset(p, 0xff, 48);
+
+ kasan_disable_current();
+ OPTIMIZER_HIDE_VAR(p);
+
+ /* Test shrink */
+ p = krealloc(p, 40, GFP_KERNEL | __GFP_ZERO);
+ for (i = 40; i < 64; i++)
+ KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
+
+ /* Test grow within the same 64B kmalloc object */
+ p = krealloc(p, 56, GFP_KERNEL | __GFP_ZERO);
+ for (i = 40; i < 56; i++)
+ KUNIT_EXPECT_EQ(test, p[i], 0);
+ for (i = 56; i < 64; i++)
+ KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 0, slab_errors);
+
+ memset(p, 0xff, 56);
+ /* Test grow with allocating a bigger 128B object */
+ p = krealloc(p, 112, GFP_KERNEL | __GFP_ZERO);
+ for (i = 0; i < 56; i++)
+ KUNIT_EXPECT_EQ(test, p[i], 0xff);
+ for (i = 56; i < 112; i++)
+ KUNIT_EXPECT_EQ(test, p[i], 0);
+
+ kfree(p);
+ kasan_enable_current();
+ kmem_cache_destroy(s);
+}
+
static int test_init(struct kunit *test)
{
slab_errors = 0;
@@ -214,6 +255,7 @@ static struct kunit_case test_cases[] = {
KUNIT_CASE(test_kmalloc_redzone_access),
KUNIT_CASE(test_kfree_rcu),
KUNIT_CASE(test_leak_destroy),
+ KUNIT_CASE(test_krealloc_redzone_zeroing),
{}
};
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 4f887aa62fa0..91fa37b5c510 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -57,7 +57,7 @@ int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
static const unsigned int rounding[] = { 500, 50, 5 };
int i = 0, j;
u32 remainder = 0, sf_cap;
- char tmp[8];
+ char tmp[12];
const char *unit;
tmp[0] = '\0';
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 989a12a67872..6dc234913dd5 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -120,6 +120,9 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
if (unlikely(count <= 0))
return 0;
+ kasan_check_write(dst, count);
+ check_object_size(dst, count, false);
+
if (can_do_masked_user_access()) {
long retval;
@@ -142,8 +145,6 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
if (max > count)
max = count;
- kasan_check_write(dst, count);
- check_object_size(dst, count, false);
if (user_read_access_begin(src, max)) {
retval = do_strncpy_from_user(dst, src, count, max);
user_read_access_end();
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index 31561e0e1a0d..704cb1093ae8 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -1387,6 +1387,92 @@ static noinline void __init check_prev_entry(struct maple_tree *mt)
mas_unlock(&mas);
}
+static noinline void __init check_store_null(struct maple_tree *mt)
+{
+ MA_STATE(mas, mt, 0, ULONG_MAX);
+
+ /*
+ * Store NULL at range [0, ULONG_MAX] to an empty tree should result
+ * in an empty tree
+ */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ mas_lock(&mas);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ MT_BUG_ON(mt, !mtree_empty(mt));
+ mas_unlock(&mas);
+ mtree_destroy(mt);
+
+ /*
+ * Store NULL at any range to an empty tree should result in an empty
+ * tree
+ */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ mas_lock(&mas);
+ mas_set_range(&mas, 3, 10);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ MT_BUG_ON(mt, !mtree_empty(mt));
+ mas_unlock(&mas);
+ mtree_destroy(mt);
+
+ /*
+ * Store NULL at range [0, ULONG_MAX] to a single entry tree should
+ * result in an empty tree
+ */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ mas_lock(&mas);
+ mas_set(&mas, 0);
+ mas_store_gfp(&mas, &mas, GFP_KERNEL);
+ mas_set_range(&mas, 0, ULONG_MAX);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ MT_BUG_ON(mt, !mtree_empty(mt));
+ mas_unlock(&mas);
+ mtree_destroy(mt);
+
+ /*
+ * Store NULL at range [0, n] to a single entry tree should
+ * result in an empty tree
+ */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ mas_lock(&mas);
+ mas_set(&mas, 0);
+ mas_store_gfp(&mas, &mas, GFP_KERNEL);
+ mas_set_range(&mas, 0, 5);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ MT_BUG_ON(mt, !mtree_empty(mt));
+ mas_unlock(&mas);
+ mtree_destroy(mt);
+
+ /*
+ * Store NULL at range [m, n] where m > 0 to a single entry tree
+ * should still be a single entry tree
+ */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ mas_lock(&mas);
+ mas_set(&mas, 0);
+ mas_store_gfp(&mas, &mas, GFP_KERNEL);
+ mas_set_range(&mas, 2, 5);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ MT_BUG_ON(mt, mtree_empty(mt));
+// MT_BUG_ON(mt, xa_is_node(mas_root(&mas)));
+ mas_unlock(&mas);
+ mtree_destroy(mt);
+
+ /*
+ * Store NULL at range [0, ULONG_MAX] to a tree with node should
+ * result in an empty tree
+ */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ mas_lock(&mas);
+ mas_set_range(&mas, 1, 3);
+ mas_store_gfp(&mas, &mas, GFP_KERNEL);
+// MT_BUG_ON(mt, !xa_is_node(mas_root(&mas)));
+ mas_set_range(&mas, 0, ULONG_MAX);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ MT_BUG_ON(mt, !mtree_empty(mt));
+ mas_unlock(&mas);
+ mtree_destroy(mt);
+}
+
static noinline void __init check_root_expand(struct maple_tree *mt)
{
MA_STATE(mas, mt, 0, 0);
@@ -3711,6 +3797,10 @@ static int __init maple_tree_seed(void)
#endif
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_store_null(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_root_expand(&tree);
mtree_destroy(&tree);
diff --git a/lib/test_min_heap.c b/lib/test_min_heap.c
index 64c877e73b64..e6fbb798558b 100644
--- a/lib/test_min_heap.c
+++ b/lib/test_min_heap.c
@@ -23,14 +23,6 @@ static __init bool greater_than(const void *lhs, const void *rhs, void __always_
return *(int *)lhs > *(int *)rhs;
}
-static __init void swap_ints(void *lhs, void *rhs, void __always_unused *args)
-{
- int temp = *(int *)lhs;
-
- *(int *)lhs = *(int *)rhs;
- *(int *)rhs = temp;
-}
-
static __init int pop_verify_heap(bool min_heap,
struct min_heap_test *heap,
const struct min_heap_callbacks *funcs)
@@ -72,7 +64,7 @@ static __init int test_heapify_all(bool min_heap)
};
struct min_heap_callbacks funcs = {
.less = min_heap ? less_than : greater_than,
- .swp = swap_ints,
+ .swp = NULL,
};
int i, err;
@@ -104,7 +96,7 @@ static __init int test_heap_push(bool min_heap)
};
struct min_heap_callbacks funcs = {
.less = min_heap ? less_than : greater_than,
- .swp = swap_ints,
+ .swp = NULL,
};
int i, temp, err;
@@ -136,7 +128,7 @@ static __init int test_heap_pop_push(bool min_heap)
};
struct min_heap_callbacks funcs = {
.less = min_heap ? less_than : greater_than,
- .swp = swap_ints,
+ .swp = NULL,
};
int i, temp, err;
@@ -175,7 +167,7 @@ static __init int test_heap_del(bool min_heap)
heap.nr = ARRAY_SIZE(values);
struct min_heap_callbacks funcs = {
.less = min_heap ? less_than : greater_than,
- .swp = swap_ints,
+ .swp = NULL,
};
int i, err;
diff --git a/lib/tests/Makefile b/lib/tests/Makefile
new file mode 100644
index 000000000000..8e4f42cb9c54
--- /dev/null
+++ b/lib/tests/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_TEST_RUNTIME_MODULE) += module/
diff --git a/lib/tests/module/.gitignore b/lib/tests/module/.gitignore
new file mode 100644
index 000000000000..8be7891b250f
--- /dev/null
+++ b/lib/tests/module/.gitignore
@@ -0,0 +1,4 @@
+test_kallsyms_a.c
+test_kallsyms_b.c
+test_kallsyms_c.c
+test_kallsyms_d.c
diff --git a/lib/tests/module/Makefile b/lib/tests/module/Makefile
new file mode 100644
index 000000000000..af5c27b996cb
--- /dev/null
+++ b/lib/tests/module/Makefile
@@ -0,0 +1,15 @@
+obj-$(CONFIG_TEST_KALLSYMS_A) += test_kallsyms_a.o
+obj-$(CONFIG_TEST_KALLSYMS_B) += test_kallsyms_b.o
+obj-$(CONFIG_TEST_KALLSYMS_C) += test_kallsyms_c.o
+obj-$(CONFIG_TEST_KALLSYMS_D) += test_kallsyms_d.o
+
+$(obj)/%.c: FORCE
+ @$(kecho) " GEN $@"
+ $(Q)$(srctree)/lib/tests/module/gen_test_kallsyms.sh $@\
+ $(CONFIG_TEST_KALLSYMS_NUMSYMS) \
+ $(CONFIG_TEST_KALLSYMS_SCALE_FACTOR)
+
+clean-files += test_kallsyms_a.c
+clean-files += test_kallsyms_b.c
+clean-files += test_kallsyms_c.c
+clean-files += test_kallsyms_d.c
diff --git a/lib/tests/module/gen_test_kallsyms.sh b/lib/tests/module/gen_test_kallsyms.sh
new file mode 100755
index 000000000000..3f2c626350ad
--- /dev/null
+++ b/lib/tests/module/gen_test_kallsyms.sh
@@ -0,0 +1,129 @@
+#!/bin/bash
+
+TARGET=$(basename $1)
+DIR=lib/tests/module
+TARGET="$DIR/$TARGET"
+NUM_SYMS=$2
+SCALE_FACTOR=$3
+TEST_TYPE=$(echo $TARGET | sed -e 's|lib/tests/module/test_kallsyms_||g')
+TEST_TYPE=$(echo $TEST_TYPE | sed -e 's|.c||g')
+
+gen_template_module_header()
+{
+ cat <<____END_MODULE
+// SPDX-License-Identifier: GPL-2.0-or-later OR copyleft-next-0.3.1
+/*
+ * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org>
+ *
+ * Automatically generated code for testing, do not edit manually.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+
+____END_MODULE
+}
+
+gen_num_syms()
+{
+ PREFIX=$1
+ NUM=$2
+ for i in $(seq 1 $NUM); do
+ printf "int auto_test_%s_%010d = 0;\n" $PREFIX $i
+ printf "EXPORT_SYMBOL_GPL(auto_test_%s_%010d);\n" $PREFIX $i
+ done
+ echo
+}
+
+gen_template_module_data_a()
+{
+ gen_num_syms a $1
+ cat <<____END_MODULE
+static int auto_runtime_test(void)
+{
+ return 0;
+}
+
+____END_MODULE
+}
+
+gen_template_module_data_b()
+{
+ printf "\nextern int auto_test_a_%010d;\n\n" 28
+ echo "static int auto_runtime_test(void)"
+ echo "{"
+ printf "\nreturn auto_test_a_%010d;\n" 28
+ echo "}"
+}
+
+gen_template_module_data_c()
+{
+ gen_num_syms c $1
+ cat <<____END_MODULE
+static int auto_runtime_test(void)
+{
+ return 0;
+}
+
+____END_MODULE
+}
+
+gen_template_module_data_d()
+{
+ gen_num_syms d $1
+ cat <<____END_MODULE
+static int auto_runtime_test(void)
+{
+ return 0;
+}
+
+____END_MODULE
+}
+
+gen_template_module_exit()
+{
+ cat <<____END_MODULE
+static int __init auto_test_module_init(void)
+{
+ return auto_runtime_test();
+}
+module_init(auto_test_module_init);
+
+static void __exit auto_test_module_exit(void)
+{
+}
+module_exit(auto_test_module_exit);
+
+MODULE_AUTHOR("Luis Chamberlain <mcgrof@kernel.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Test module for kallsyms");
+____END_MODULE
+}
+
+case $TEST_TYPE in
+ a)
+ gen_template_module_header > $TARGET
+ gen_template_module_data_a $NUM_SYMS >> $TARGET
+ gen_template_module_exit >> $TARGET
+ ;;
+ b)
+ gen_template_module_header > $TARGET
+ gen_template_module_data_b >> $TARGET
+ gen_template_module_exit >> $TARGET
+ ;;
+ c)
+ gen_template_module_header > $TARGET
+ gen_template_module_data_c $((NUM_SYMS * SCALE_FACTOR)) >> $TARGET
+ gen_template_module_exit >> $TARGET
+ ;;
+ d)
+ gen_template_module_header > $TARGET
+ gen_template_module_data_d $((NUM_SYMS * SCALE_FACTOR * 2)) >> $TARGET
+ gen_template_module_exit >> $TARGET
+ ;;
+ *)
+ ;;
+esac
diff --git a/lib/util_macros_kunit.c b/lib/util_macros_kunit.c
new file mode 100644
index 000000000000..94cc9f0de50a
--- /dev/null
+++ b/lib/util_macros_kunit.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Test cases for bitfield helpers.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/util_macros.h>
+
+#define FIND_CLOSEST_RANGE_CHECK(from, to, array, exp_idx) \
+{ \
+ int i; \
+ for (i = from; i <= to; i++) { \
+ int found = find_closest(i, array, ARRAY_SIZE(array)); \
+ KUNIT_ASSERT_EQ(ctx, exp_idx, found); \
+ } \
+}
+
+static void test_find_closest(struct kunit *ctx)
+{
+ /* This will test a few arrays that are found in drivers */
+ static const int ina226_avg_tab[] = { 1, 4, 16, 64, 128, 256, 512, 1024 };
+ static const unsigned int ad7616_oversampling_avail[] = {
+ 1, 2, 4, 8, 16, 32, 64, 128,
+ };
+ static u32 wd_timeout_table[] = { 2, 4, 6, 8, 16, 32, 48, 64 };
+ static int array_prog1a[] = { 1, 2, 3, 4, 5 };
+ static u32 array_prog1b[] = { 2, 3, 4, 5, 6 };
+ static int array_prog1mix[] = { -2, -1, 0, 1, 2 };
+ static int array_prog2a[] = { 1, 3, 5, 7 };
+ static u32 array_prog2b[] = { 2, 4, 6, 8 };
+ static int array_prog3a[] = { 1, 4, 7, 10 };
+ static u32 array_prog3b[] = { 2, 5, 8, 11 };
+ static int array_prog4a[] = { 1, 5, 9, 13 };
+ static u32 array_prog4b[] = { 2, 6, 10, 14 };
+
+ FIND_CLOSEST_RANGE_CHECK(-3, 2, ina226_avg_tab, 0);
+ FIND_CLOSEST_RANGE_CHECK(3, 10, ina226_avg_tab, 1);
+ FIND_CLOSEST_RANGE_CHECK(11, 40, ina226_avg_tab, 2);
+ FIND_CLOSEST_RANGE_CHECK(41, 96, ina226_avg_tab, 3);
+ FIND_CLOSEST_RANGE_CHECK(97, 192, ina226_avg_tab, 4);
+ FIND_CLOSEST_RANGE_CHECK(193, 384, ina226_avg_tab, 5);
+ FIND_CLOSEST_RANGE_CHECK(385, 768, ina226_avg_tab, 6);
+ FIND_CLOSEST_RANGE_CHECK(769, 2048, ina226_avg_tab, 7);
+
+ /* The array that found the bug that caused this kunit to exist */
+ FIND_CLOSEST_RANGE_CHECK(-3, 1, ad7616_oversampling_avail, 0);
+ FIND_CLOSEST_RANGE_CHECK(2, 3, ad7616_oversampling_avail, 1);
+ FIND_CLOSEST_RANGE_CHECK(4, 6, ad7616_oversampling_avail, 2);
+ FIND_CLOSEST_RANGE_CHECK(7, 12, ad7616_oversampling_avail, 3);
+ FIND_CLOSEST_RANGE_CHECK(13, 24, ad7616_oversampling_avail, 4);
+ FIND_CLOSEST_RANGE_CHECK(25, 48, ad7616_oversampling_avail, 5);
+ FIND_CLOSEST_RANGE_CHECK(49, 96, ad7616_oversampling_avail, 6);
+ FIND_CLOSEST_RANGE_CHECK(97, 256, ad7616_oversampling_avail, 7);
+
+ FIND_CLOSEST_RANGE_CHECK(-3, 3, wd_timeout_table, 0);
+ FIND_CLOSEST_RANGE_CHECK(4, 5, wd_timeout_table, 1);
+ FIND_CLOSEST_RANGE_CHECK(6, 7, wd_timeout_table, 2);
+ FIND_CLOSEST_RANGE_CHECK(8, 12, wd_timeout_table, 3);
+ FIND_CLOSEST_RANGE_CHECK(13, 24, wd_timeout_table, 4);
+ FIND_CLOSEST_RANGE_CHECK(25, 40, wd_timeout_table, 5);
+ FIND_CLOSEST_RANGE_CHECK(41, 56, wd_timeout_table, 6);
+ FIND_CLOSEST_RANGE_CHECK(57, 128, wd_timeout_table, 7);
+
+ /* One could argue that find_closest() should not be used for monotonic
+ * arrays (like 1,2,3,4,5), but even so, it should work as long as the
+ * array is sorted ascending. */
+ FIND_CLOSEST_RANGE_CHECK(-3, 1, array_prog1a, 0);
+ FIND_CLOSEST_RANGE_CHECK(2, 2, array_prog1a, 1);
+ FIND_CLOSEST_RANGE_CHECK(3, 3, array_prog1a, 2);
+ FIND_CLOSEST_RANGE_CHECK(4, 4, array_prog1a, 3);
+ FIND_CLOSEST_RANGE_CHECK(5, 8, array_prog1a, 4);
+
+ FIND_CLOSEST_RANGE_CHECK(-3, 2, array_prog1b, 0);
+ FIND_CLOSEST_RANGE_CHECK(3, 3, array_prog1b, 1);
+ FIND_CLOSEST_RANGE_CHECK(4, 4, array_prog1b, 2);
+ FIND_CLOSEST_RANGE_CHECK(5, 5, array_prog1b, 3);
+ FIND_CLOSEST_RANGE_CHECK(6, 8, array_prog1b, 4);
+
+ FIND_CLOSEST_RANGE_CHECK(-4, -2, array_prog1mix, 0);
+ FIND_CLOSEST_RANGE_CHECK(-1, -1, array_prog1mix, 1);
+ FIND_CLOSEST_RANGE_CHECK(0, 0, array_prog1mix, 2);
+ FIND_CLOSEST_RANGE_CHECK(1, 1, array_prog1mix, 3);
+ FIND_CLOSEST_RANGE_CHECK(2, 5, array_prog1mix, 4);
+
+ FIND_CLOSEST_RANGE_CHECK(-3, 2, array_prog2a, 0);
+ FIND_CLOSEST_RANGE_CHECK(3, 4, array_prog2a, 1);
+ FIND_CLOSEST_RANGE_CHECK(5, 6, array_prog2a, 2);
+ FIND_CLOSEST_RANGE_CHECK(7, 10, array_prog2a, 3);
+
+ FIND_CLOSEST_RANGE_CHECK(-3, 3, array_prog2b, 0);
+ FIND_CLOSEST_RANGE_CHECK(4, 5, array_prog2b, 1);
+ FIND_CLOSEST_RANGE_CHECK(6, 7, array_prog2b, 2);
+ FIND_CLOSEST_RANGE_CHECK(8, 10, array_prog2b, 3);
+
+ FIND_CLOSEST_RANGE_CHECK(-3, 2, array_prog3a, 0);
+ FIND_CLOSEST_RANGE_CHECK(3, 5, array_prog3a, 1);
+ FIND_CLOSEST_RANGE_CHECK(6, 8, array_prog3a, 2);
+ FIND_CLOSEST_RANGE_CHECK(9, 20, array_prog3a, 3);
+
+ FIND_CLOSEST_RANGE_CHECK(-3, 3, array_prog3b, 0);
+ FIND_CLOSEST_RANGE_CHECK(4, 6, array_prog3b, 1);
+ FIND_CLOSEST_RANGE_CHECK(7, 9, array_prog3b, 2);
+ FIND_CLOSEST_RANGE_CHECK(10, 20, array_prog3b, 3);
+
+ FIND_CLOSEST_RANGE_CHECK(-3, 3, array_prog4a, 0);
+ FIND_CLOSEST_RANGE_CHECK(4, 7, array_prog4a, 1);
+ FIND_CLOSEST_RANGE_CHECK(8, 11, array_prog4a, 2);
+ FIND_CLOSEST_RANGE_CHECK(12, 20, array_prog4a, 3);
+
+ FIND_CLOSEST_RANGE_CHECK(-3, 4, array_prog4b, 0);
+ FIND_CLOSEST_RANGE_CHECK(5, 8, array_prog4b, 1);
+ FIND_CLOSEST_RANGE_CHECK(9, 12, array_prog4b, 2);
+ FIND_CLOSEST_RANGE_CHECK(13, 20, array_prog4b, 3);
+}
+
+#define FIND_CLOSEST_DESC_RANGE_CHECK(from, to, array, exp_idx) \
+{ \
+ int i; \
+ for (i = from; i <= to; i++) { \
+ int found = find_closest_descending(i, array, \
+ ARRAY_SIZE(array)); \
+ KUNIT_ASSERT_EQ(ctx, exp_idx, found); \
+ } \
+}
+
+static void test_find_closest_descending(struct kunit *ctx)
+{
+ /* Same arrays as 'test_find_closest' but reversed */
+ static const int ina226_avg_tab[] = { 1024, 512, 256, 128, 64, 16, 4, 1 };
+ static const unsigned int ad7616_oversampling_avail[] = {
+ 128, 64, 32, 16, 8, 4, 2, 1
+ };
+ static u32 wd_timeout_table[] = { 64, 48, 32, 16, 8, 6, 4, 2 };
+ static int array_prog1a[] = { 5, 4, 3, 2, 1 };
+ static u32 array_prog1b[] = { 6, 5, 4, 3, 2 };
+ static int array_prog1mix[] = { 2, 1, 0, -1, -2 };
+ static int array_prog2a[] = { 7, 5, 3, 1 };
+ static u32 array_prog2b[] = { 8, 6, 4, 2 };
+ static int array_prog3a[] = { 10, 7, 4, 1 };
+ static u32 array_prog3b[] = { 11, 8, 5, 2 };
+ static int array_prog4a[] = { 13, 9, 5, 1 };
+ static u32 array_prog4b[] = { 14, 10, 6, 2 };
+
+ FIND_CLOSEST_DESC_RANGE_CHECK(-3, 2, ina226_avg_tab, 7);
+ FIND_CLOSEST_DESC_RANGE_CHECK(3, 10, ina226_avg_tab, 6);
+ FIND_CLOSEST_DESC_RANGE_CHECK(11, 40, ina226_avg_tab, 5);
+ FIND_CLOSEST_DESC_RANGE_CHECK(41, 96, ina226_avg_tab, 4);
+ FIND_CLOSEST_DESC_RANGE_CHECK(97, 192, ina226_avg_tab, 3);
+ FIND_CLOSEST_DESC_RANGE_CHECK(193, 384, ina226_avg_tab, 2);
+ FIND_CLOSEST_DESC_RANGE_CHECK(385, 768, ina226_avg_tab, 1);
+ FIND_CLOSEST_DESC_RANGE_CHECK(769, 2048, ina226_avg_tab, 0);
+
+ FIND_CLOSEST_DESC_RANGE_CHECK(-3, 1, ad7616_oversampling_avail, 7);
+ FIND_CLOSEST_DESC_RANGE_CHECK(2, 3, ad7616_oversampling_avail, 6);
+ FIND_CLOSEST_DESC_RANGE_CHECK(4, 6, ad7616_oversampling_avail, 5);
+ FIND_CLOSEST_DESC_RANGE_CHECK(7, 12, ad7616_oversampling_avail, 4);
+ FIND_CLOSEST_DESC_RANGE_CHECK(13, 24, ad7616_oversampling_avail, 3);
+ FIND_CLOSEST_DESC_RANGE_CHECK(25, 48, ad7616_oversampling_avail, 2);
+ FIND_CLOSEST_DESC_RANGE_CHECK(49, 96, ad7616_oversampling_avail, 1);
+ FIND_CLOSEST_DESC_RANGE_CHECK(97, 256, ad7616_oversampling_avail, 0);
+
+ FIND_CLOSEST_DESC_RANGE_CHECK(-3, 3, wd_timeout_table, 7);
+ FIND_CLOSEST_DESC_RANGE_CHECK(4, 5, wd_timeout_table, 6);
+ FIND_CLOSEST_DESC_RANGE_CHECK(6, 7, wd_timeout_table, 5);
+ FIND_CLOSEST_DESC_RANGE_CHECK(8, 12, wd_timeout_table, 4);
+ FIND_CLOSEST_DESC_RANGE_CHECK(13, 24, wd_timeout_table, 3);
+ FIND_CLOSEST_DESC_RANGE_CHECK(25, 40, wd_timeout_table, 2);
+ FIND_CLOSEST_DESC_RANGE_CHECK(41, 56, wd_timeout_table, 1);
+ FIND_CLOSEST_DESC_RANGE_CHECK(57, 128, wd_timeout_table, 0);
+
+ /* One could argue that find_closest_descending() should not be used
+ * for monotonic arrays (like 5,4,3,2,1), but even so, it should still
+ * it should work as long as the array is sorted descending. */
+ FIND_CLOSEST_DESC_RANGE_CHECK(-3, 1, array_prog1a, 4);
+ FIND_CLOSEST_DESC_RANGE_CHECK(2, 2, array_prog1a, 3);
+ FIND_CLOSEST_DESC_RANGE_CHECK(3, 3, array_prog1a, 2);
+ FIND_CLOSEST_DESC_RANGE_CHECK(4, 4, array_prog1a, 1);
+ FIND_CLOSEST_DESC_RANGE_CHECK(5, 8, array_prog1a, 0);
+
+ FIND_CLOSEST_DESC_RANGE_CHECK(-3, 2, array_prog1b, 4);
+ FIND_CLOSEST_DESC_RANGE_CHECK(3, 3, array_prog1b, 3);
+ FIND_CLOSEST_DESC_RANGE_CHECK(4, 4, array_prog1b, 2);
+ FIND_CLOSEST_DESC_RANGE_CHECK(5, 5, array_prog1b, 1);
+ FIND_CLOSEST_DESC_RANGE_CHECK(6, 8, array_prog1b, 0);
+
+ FIND_CLOSEST_DESC_RANGE_CHECK(-4, -2, array_prog1mix, 4);
+ FIND_CLOSEST_DESC_RANGE_CHECK(-1, -1, array_prog1mix, 3);
+ FIND_CLOSEST_DESC_RANGE_CHECK(0, 0, array_prog1mix, 2);
+ FIND_CLOSEST_DESC_RANGE_CHECK(1, 1, array_prog1mix, 1);
+ FIND_CLOSEST_DESC_RANGE_CHECK(2, 5, array_prog1mix, 0);
+
+ FIND_CLOSEST_DESC_RANGE_CHECK(-3, 2, array_prog2a, 3);
+ FIND_CLOSEST_DESC_RANGE_CHECK(3, 4, array_prog2a, 2);
+ FIND_CLOSEST_DESC_RANGE_CHECK(5, 6, array_prog2a, 1);
+ FIND_CLOSEST_DESC_RANGE_CHECK(7, 10, array_prog2a, 0);
+
+ FIND_CLOSEST_DESC_RANGE_CHECK(-3, 3, array_prog2b, 3);
+ FIND_CLOSEST_DESC_RANGE_CHECK(4, 5, array_prog2b, 2);
+ FIND_CLOSEST_DESC_RANGE_CHECK(6, 7, array_prog2b, 1);
+ FIND_CLOSEST_DESC_RANGE_CHECK(8, 10, array_prog2b, 0);
+
+ FIND_CLOSEST_DESC_RANGE_CHECK(-3, 2, array_prog3a, 3);
+ FIND_CLOSEST_DESC_RANGE_CHECK(3, 5, array_prog3a, 2);
+ FIND_CLOSEST_DESC_RANGE_CHECK(6, 8, array_prog3a, 1);
+ FIND_CLOSEST_DESC_RANGE_CHECK(9, 20, array_prog3a, 0);
+
+ FIND_CLOSEST_DESC_RANGE_CHECK(-3, 3, array_prog3b, 3);
+ FIND_CLOSEST_DESC_RANGE_CHECK(4, 6, array_prog3b, 2);
+ FIND_CLOSEST_DESC_RANGE_CHECK(7, 9, array_prog3b, 1);
+ FIND_CLOSEST_DESC_RANGE_CHECK(10, 20, array_prog3b, 0);
+
+ FIND_CLOSEST_DESC_RANGE_CHECK(-3, 3, array_prog4a, 3);
+ FIND_CLOSEST_DESC_RANGE_CHECK(4, 7, array_prog4a, 2);
+ FIND_CLOSEST_DESC_RANGE_CHECK(8, 11, array_prog4a, 1);
+ FIND_CLOSEST_DESC_RANGE_CHECK(12, 20, array_prog4a, 0);
+
+ FIND_CLOSEST_DESC_RANGE_CHECK(-3, 4, array_prog4b, 3);
+ FIND_CLOSEST_DESC_RANGE_CHECK(5, 8, array_prog4b, 2);
+ FIND_CLOSEST_DESC_RANGE_CHECK(9, 12, array_prog4b, 1);
+ FIND_CLOSEST_DESC_RANGE_CHECK(13, 20, array_prog4b, 0);
+}
+
+static struct kunit_case __refdata util_macros_test_cases[] = {
+ KUNIT_CASE(test_find_closest),
+ KUNIT_CASE(test_find_closest_descending),
+ {}
+};
+
+static struct kunit_suite util_macros_test_suite = {
+ .name = "util_macros.h",
+ .test_cases = util_macros_test_cases,
+};
+
+kunit_test_suites(&util_macros_test_suite);
+
+MODULE_AUTHOR("Alexandru Ardelean <aardelean@baylibre.com>");
+MODULE_DESCRIPTION("Test cases for util_macros.h helpers");
+MODULE_LICENSE("GPL");