diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 45 | ||||
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/alloc_tag.c | 6 | ||||
-rw-r--r-- | lib/atomic64.c | 78 | ||||
-rw-r--r-- | lib/cpumask.c | 5 | ||||
-rw-r--r-- | lib/crypto/aesgcm.c | 2 | ||||
-rw-r--r-- | lib/crypto/gf128mul.c | 75 | ||||
-rw-r--r-- | lib/fault-inject.c | 28 | ||||
-rw-r--r-- | lib/inflate.c | 2 | ||||
-rw-r--r-- | lib/kobject.c | 24 | ||||
-rw-r--r-- | lib/kunit_iov_iter.c | 5 | ||||
-rw-r--r-- | lib/list_debug.c | 22 | ||||
-rw-r--r-- | lib/list_sort.c | 7 | ||||
-rw-r--r-- | lib/lz4/lz4_compress.c | 1 | ||||
-rw-r--r-- | lib/lz4/lz4_decompress.c | 1 | ||||
-rw-r--r-- | lib/lz4/lz4defs.h | 4 | ||||
-rw-r--r-- | lib/lz4/lz4hc_compress.c | 1 | ||||
-rw-r--r-- | lib/maple_tree.c | 73 | ||||
-rw-r--r-- | lib/math/Makefile | 1 | ||||
-rw-r--r-- | lib/math/tests/Makefile | 1 | ||||
-rw-r--r-- | lib/math/tests/int_sqrt_kunit.c | 66 | ||||
-rw-r--r-- | lib/rhashtable.c | 14 | ||||
-rw-r--r-- | lib/sort.c | 7 | ||||
-rw-r--r-- | lib/test_maple_tree.c | 56 | ||||
-rw-r--r-- | lib/test_min_heap.c | 30 | ||||
-rw-r--r-- | lib/test_sysctl.c | 6 | ||||
-rw-r--r-- | lib/test_vmalloc.c | 2 | ||||
-rw-r--r-- | lib/test_xarray.c | 706 | ||||
-rw-r--r-- | lib/xarray.c | 78 |
29 files changed, 788 insertions, 560 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 5f1874622175..775966cf6114 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2269,7 +2269,6 @@ config TEST_LIST_SORT config TEST_MIN_HEAP tristate "Min heap test" depends on DEBUG_KERNEL || m - select MIN_HEAP help Enable this to turn on min heap function tests. This test is executed only once during system boot (so affects only boot time), @@ -2457,8 +2456,22 @@ config TEST_BITMAP config TEST_UUID tristate "Test functions located in the uuid module at runtime" -config TEST_XARRAY - tristate "Test the XArray code at runtime" +config XARRAY_KUNIT + tristate "KUnit test XArray code at runtime" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + help + Enable this option to test the Xarray code at boot. + + KUnit tests run during boot and output the results to the debug log + in TAP format (http://testanything.org/). Only useful for kernel devs + running the KUnit test harness, and not intended for inclusion into a + production build. + + For more information on KUnit and unit tests in general please refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + + If unsure, say N. config TEST_MAPLE_TREE tristate "Test the Maple Tree code at runtime or module load" @@ -2479,6 +2492,17 @@ config TEST_RHASHTABLE config TEST_IDA tristate "Perform selftest on IDA functions" +config TEST_MISC_MINOR + tristate "Basic misc minor Kunit test" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + help + Kunit test for the misc minor. + It tests misc minor functions for dynamic and misc dynamic minor. + This include misc_xxx functions + + If unsure, say N. + config TEST_PARMAN tristate "Perform selftest on priority array manager" depends on PARMAN @@ -3172,6 +3196,21 @@ config INT_POW_TEST If unsure, say N +config INT_SQRT_KUNIT_TEST + tristate "Integer square root test" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + help + This option enables the KUnit test suite for the int_sqrt() function, + which performs square root calculation. The test suite checks + various scenarios, including edge cases, to ensure correctness. + + Enabling this option will include tests that check various scenarios + and edge cases to ensure the accuracy and reliability of the square root + function. + + If unsure, say N + endif # RUNTIME_TESTING_MENU config ARCH_USE_MEMTEST diff --git a/lib/Makefile b/lib/Makefile index d5cfc7afbbb8..f1c6e9d76a7c 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -94,7 +94,6 @@ GCOV_PROFILE_test_bitmap.o := n endif obj-$(CONFIG_TEST_UUID) += test_uuid.o -obj-$(CONFIG_TEST_XARRAY) += test_xarray.o obj-$(CONFIG_TEST_MAPLE_TREE) += test_maple_tree.o obj-$(CONFIG_TEST_PARMAN) += test_parman.o obj-$(CONFIG_TEST_KMOD) += test_kmod.o @@ -373,6 +372,7 @@ CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN) obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o obj-$(CONFIG_CHECKSUM_KUNIT) += checksum_kunit.o obj-$(CONFIG_UTIL_MACROS_KUNIT) += util_macros_kunit.o +obj-$(CONFIG_XARRAY_KUNIT) += test_xarray.o obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o obj-$(CONFIG_HASHTABLE_KUNIT_TEST) += hashtable_test.o obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c index 65e706e1bc19..19b45617bdcf 100644 --- a/lib/alloc_tag.c +++ b/lib/alloc_tag.c @@ -29,6 +29,8 @@ EXPORT_SYMBOL(_shared_alloc_tag); DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT, mem_alloc_profiling_key); +EXPORT_SYMBOL(mem_alloc_profiling_key); + DEFINE_STATIC_KEY_FALSE(mem_profiling_compressed); struct alloc_tag_kernel_section kernel_tags = { NULL, 0 }; @@ -423,8 +425,8 @@ static int vm_module_tags_populate(void) unsigned long nr; more_pages = ALIGN(new_end - phys_end, PAGE_SIZE) >> PAGE_SHIFT; - nr = alloc_pages_bulk_array_node(GFP_KERNEL | __GFP_NOWARN, - NUMA_NO_NODE, more_pages, next_page); + nr = alloc_pages_bulk_node(GFP_KERNEL | __GFP_NOWARN, + NUMA_NO_NODE, more_pages, next_page); if (nr < more_pages || vmap_pages_range(phys_end, phys_end + (nr << PAGE_SHIFT), PAGE_KERNEL, next_page, PAGE_SHIFT) < 0) { diff --git a/lib/atomic64.c b/lib/atomic64.c index caf895789a1e..1a72bba36d24 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c @@ -25,15 +25,15 @@ * Ensure each lock is in a separate cacheline. */ static union { - raw_spinlock_t lock; + arch_spinlock_t lock; char pad[L1_CACHE_BYTES]; } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = { [0 ... (NR_LOCKS - 1)] = { - .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock), + .lock = __ARCH_SPIN_LOCK_UNLOCKED, }, }; -static inline raw_spinlock_t *lock_addr(const atomic64_t *v) +static inline arch_spinlock_t *lock_addr(const atomic64_t *v) { unsigned long addr = (unsigned long) v; @@ -45,12 +45,14 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v) s64 generic_atomic64_read(const atomic64_t *v) { unsigned long flags; - raw_spinlock_t *lock = lock_addr(v); + arch_spinlock_t *lock = lock_addr(v); s64 val; - raw_spin_lock_irqsave(lock, flags); + local_irq_save(flags); + arch_spin_lock(lock); val = v->counter; - raw_spin_unlock_irqrestore(lock, flags); + arch_spin_unlock(lock); + local_irq_restore(flags); return val; } EXPORT_SYMBOL(generic_atomic64_read); @@ -58,11 +60,13 @@ EXPORT_SYMBOL(generic_atomic64_read); void generic_atomic64_set(atomic64_t *v, s64 i) { unsigned long flags; - raw_spinlock_t *lock = lock_addr(v); + arch_spinlock_t *lock = lock_addr(v); - raw_spin_lock_irqsave(lock, flags); + local_irq_save(flags); + arch_spin_lock(lock); v->counter = i; - raw_spin_unlock_irqrestore(lock, flags); + arch_spin_unlock(lock); + local_irq_restore(flags); } EXPORT_SYMBOL(generic_atomic64_set); @@ -70,11 +74,13 @@ EXPORT_SYMBOL(generic_atomic64_set); void generic_atomic64_##op(s64 a, atomic64_t *v) \ { \ unsigned long flags; \ - raw_spinlock_t *lock = lock_addr(v); \ + arch_spinlock_t *lock = lock_addr(v); \ \ - raw_spin_lock_irqsave(lock, flags); \ + local_irq_save(flags); \ + arch_spin_lock(lock); \ v->counter c_op a; \ - raw_spin_unlock_irqrestore(lock, flags); \ + arch_spin_unlock(lock); \ + local_irq_restore(flags); \ } \ EXPORT_SYMBOL(generic_atomic64_##op); @@ -82,12 +88,14 @@ EXPORT_SYMBOL(generic_atomic64_##op); s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \ { \ unsigned long flags; \ - raw_spinlock_t *lock = lock_addr(v); \ + arch_spinlock_t *lock = lock_addr(v); \ s64 val; \ \ - raw_spin_lock_irqsave(lock, flags); \ + local_irq_save(flags); \ + arch_spin_lock(lock); \ val = (v->counter c_op a); \ - raw_spin_unlock_irqrestore(lock, flags); \ + arch_spin_unlock(lock); \ + local_irq_restore(flags); \ return val; \ } \ EXPORT_SYMBOL(generic_atomic64_##op##_return); @@ -96,13 +104,15 @@ EXPORT_SYMBOL(generic_atomic64_##op##_return); s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \ { \ unsigned long flags; \ - raw_spinlock_t *lock = lock_addr(v); \ + arch_spinlock_t *lock = lock_addr(v); \ s64 val; \ \ - raw_spin_lock_irqsave(lock, flags); \ + local_irq_save(flags); \ + arch_spin_lock(lock); \ val = v->counter; \ v->counter c_op a; \ - raw_spin_unlock_irqrestore(lock, flags); \ + arch_spin_unlock(lock); \ + local_irq_restore(flags); \ return val; \ } \ EXPORT_SYMBOL(generic_atomic64_fetch_##op); @@ -131,14 +141,16 @@ ATOMIC64_OPS(xor, ^=) s64 generic_atomic64_dec_if_positive(atomic64_t *v) { unsigned long flags; - raw_spinlock_t *lock = lock_addr(v); + arch_spinlock_t *lock = lock_addr(v); s64 val; - raw_spin_lock_irqsave(lock, flags); + local_irq_save(flags); + arch_spin_lock(lock); val = v->counter - 1; if (val >= 0) v->counter = val; - raw_spin_unlock_irqrestore(lock, flags); + arch_spin_unlock(lock); + local_irq_restore(flags); return val; } EXPORT_SYMBOL(generic_atomic64_dec_if_positive); @@ -146,14 +158,16 @@ EXPORT_SYMBOL(generic_atomic64_dec_if_positive); s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n) { unsigned long flags; - raw_spinlock_t *lock = lock_addr(v); + arch_spinlock_t *lock = lock_addr(v); s64 val; - raw_spin_lock_irqsave(lock, flags); + local_irq_save(flags); + arch_spin_lock(lock); val = v->counter; if (val == o) v->counter = n; - raw_spin_unlock_irqrestore(lock, flags); + arch_spin_unlock(lock); + local_irq_restore(flags); return val; } EXPORT_SYMBOL(generic_atomic64_cmpxchg); @@ -161,13 +175,15 @@ EXPORT_SYMBOL(generic_atomic64_cmpxchg); s64 generic_atomic64_xchg(atomic64_t *v, s64 new) { unsigned long flags; - raw_spinlock_t *lock = lock_addr(v); + arch_spinlock_t *lock = lock_addr(v); s64 val; - raw_spin_lock_irqsave(lock, flags); + local_irq_save(flags); + arch_spin_lock(lock); val = v->counter; v->counter = new; - raw_spin_unlock_irqrestore(lock, flags); + arch_spin_unlock(lock); + local_irq_restore(flags); return val; } EXPORT_SYMBOL(generic_atomic64_xchg); @@ -175,14 +191,16 @@ EXPORT_SYMBOL(generic_atomic64_xchg); s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { unsigned long flags; - raw_spinlock_t *lock = lock_addr(v); + arch_spinlock_t *lock = lock_addr(v); s64 val; - raw_spin_lock_irqsave(lock, flags); + local_irq_save(flags); + arch_spin_lock(lock); val = v->counter; if (val != u) v->counter += a; - raw_spin_unlock_irqrestore(lock, flags); + arch_spin_unlock(lock); + local_irq_restore(flags); return val; } diff --git a/lib/cpumask.c b/lib/cpumask.c index e77ee9d46f71..57274ba8b6d9 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -83,10 +83,7 @@ EXPORT_SYMBOL(alloc_cpumask_var_node); */ void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) { - *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES); - if (!*mask) - panic("%s: Failed to allocate %u bytes\n", __func__, - cpumask_size()); + *mask = memblock_alloc_or_panic(cpumask_size(), SMP_CACHE_BYTES); } /** diff --git a/lib/crypto/aesgcm.c b/lib/crypto/aesgcm.c index 6bba6473fdf3..902e49410aaf 100644 --- a/lib/crypto/aesgcm.c +++ b/lib/crypto/aesgcm.c @@ -697,7 +697,7 @@ static int __init libaesgcm_init(void) u8 tagbuf[AES_BLOCK_SIZE]; int plen = aesgcm_tv[i].plen; struct aesgcm_ctx ctx; - u8 buf[sizeof(ptext12)]; + static u8 buf[sizeof(ptext12)]; if (aesgcm_expandkey(&ctx, aesgcm_tv[i].key, aesgcm_tv[i].klen, aesgcm_tv[i].clen - plen)) { diff --git a/lib/crypto/gf128mul.c b/lib/crypto/gf128mul.c index 8f8c45e0cdcf..fbe72cb3453a 100644 --- a/lib/crypto/gf128mul.c +++ b/lib/crypto/gf128mul.c @@ -225,44 +225,6 @@ void gf128mul_lle(be128 *r, const be128 *b) } EXPORT_SYMBOL(gf128mul_lle); -void gf128mul_bbe(be128 *r, const be128 *b) -{ - be128 p[8]; - int i; - - p[0] = *r; - for (i = 0; i < 7; ++i) - gf128mul_x_bbe(&p[i + 1], &p[i]); - - memset(r, 0, sizeof(*r)); - for (i = 0;;) { - u8 ch = ((u8 *)b)[i]; - - if (ch & 0x80) - be128_xor(r, r, &p[7]); - if (ch & 0x40) - be128_xor(r, r, &p[6]); - if (ch & 0x20) - be128_xor(r, r, &p[5]); - if (ch & 0x10) - be128_xor(r, r, &p[4]); - if (ch & 0x08) - be128_xor(r, r, &p[3]); - if (ch & 0x04) - be128_xor(r, r, &p[2]); - if (ch & 0x02) - be128_xor(r, r, &p[1]); - if (ch & 0x01) - be128_xor(r, r, &p[0]); - - if (++i >= 16) - break; - - gf128mul_x8_bbe(r); - } -} -EXPORT_SYMBOL(gf128mul_bbe); - /* This version uses 64k bytes of table space. A 16 byte buffer has to be multiplied by a 16 byte key value in GF(2^128). If we consider a GF(2^128) value in @@ -380,28 +342,6 @@ out: } EXPORT_SYMBOL(gf128mul_init_4k_lle); -struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g) -{ - struct gf128mul_4k *t; - int j, k; - - t = kzalloc(sizeof(*t), GFP_KERNEL); - if (!t) - goto out; - - t->t[1] = *g; - for (j = 1; j <= 64; j <<= 1) - gf128mul_x_bbe(&t->t[j + j], &t->t[j]); - - for (j = 2; j < 256; j += j) - for (k = 1; k < j; ++k) - be128_xor(&t->t[j + k], &t->t[j], &t->t[k]); - -out: - return t; -} -EXPORT_SYMBOL(gf128mul_init_4k_bbe); - void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t) { u8 *ap = (u8 *)a; @@ -417,20 +357,5 @@ void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t) } EXPORT_SYMBOL(gf128mul_4k_lle); -void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t) -{ - u8 *ap = (u8 *)a; - be128 r[1]; - int i = 0; - - *r = t->t[ap[0]]; - while (++i < 16) { - gf128mul_x8_bbe(r); - be128_xor(r, r, &t->t[ap[i]]); - } - *a = *r; -} -EXPORT_SYMBOL(gf128mul_4k_bbe); - MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Functions for multiplying elements of GF(2^128)"); diff --git a/lib/fault-inject.c b/lib/fault-inject.c index 52eb6ba29698..999053fa133e 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only #include <linux/kernel.h> #include <linux/init.h> -#include <linux/random.h> +#include <linux/prandom.h> #include <linux/debugfs.h> #include <linux/sched.h> #include <linux/stat.h> @@ -13,6 +13,24 @@ #include <linux/fault-inject.h> /* + * The should_fail() functions use prandom instead of the normal Linux RNG + * since they don't need cryptographically secure random numbers. + */ +static DEFINE_PER_CPU(struct rnd_state, fault_rnd_state); + +static u32 fault_prandom_u32_below_100(void) +{ + struct rnd_state *state; + u32 res; + + state = &get_cpu_var(fault_rnd_state); + res = prandom_u32_state(state); + put_cpu_var(fault_rnd_state); + + return res % 100; +} + +/* * setup_fault_attr() is a helper function for various __setup handlers, so it * returns 0 on error, because that is what __setup handlers do. */ @@ -31,6 +49,8 @@ int setup_fault_attr(struct fault_attr *attr, char *str) return 0; } + prandom_init_once(&fault_rnd_state); + attr->probability = probability; attr->interval = interval; atomic_set(&attr->times, times); @@ -146,7 +166,7 @@ bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags) return false; } - if (attr->probability <= get_random_u32_below(100)) + if (attr->probability <= fault_prandom_u32_below_100()) return false; fail: @@ -219,6 +239,8 @@ struct dentry *fault_create_debugfs_attr(const char *name, if (IS_ERR(dir)) return dir; + prandom_init_once(&fault_rnd_state); + debugfs_create_ul("probability", mode, dir, &attr->probability); debugfs_create_ul("interval", mode, dir, &attr->interval); debugfs_create_atomic_t("times", mode, dir, &attr->times); @@ -431,6 +453,8 @@ static const struct config_item_type fault_config_type = { void fault_config_init(struct fault_config *config, const char *name) { + prandom_init_once(&fault_rnd_state); + config_group_init_type_name(&config->group, name, &fault_config_type); } EXPORT_SYMBOL_GPL(fault_config_init); diff --git a/lib/inflate.c b/lib/inflate.c index fbaf03c1748d..eab886baa1b4 100644 --- a/lib/inflate.c +++ b/lib/inflate.c @@ -1257,8 +1257,6 @@ static int INIT gunzip(void) /* Decompress */ if ((res = inflate())) { switch (res) { - case 0: - break; case 1: error("invalid compressed format (err=1)"); break; diff --git a/lib/kobject.c b/lib/kobject.c index 72fa20f405f1..abe5f5b856ce 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -1096,30 +1096,6 @@ void *kobj_ns_grab_current(enum kobj_ns_type type) } EXPORT_SYMBOL_GPL(kobj_ns_grab_current); -const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk) -{ - const void *ns = NULL; - - spin_lock(&kobj_ns_type_lock); - if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type]) - ns = kobj_ns_ops_tbl[type]->netlink_ns(sk); - spin_unlock(&kobj_ns_type_lock); - - return ns; -} - -const void *kobj_ns_initial(enum kobj_ns_type type) -{ - const void *ns = NULL; - - spin_lock(&kobj_ns_type_lock); - if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type]) - ns = kobj_ns_ops_tbl[type]->initial_ns(); - spin_unlock(&kobj_ns_type_lock); - - return ns; -} - void kobj_ns_drop(enum kobj_ns_type type, void *ns) { spin_lock(&kobj_ns_type_lock); diff --git a/lib/kunit_iov_iter.c b/lib/kunit_iov_iter.c index 10a560feb66e..48342736d016 100644 --- a/lib/kunit_iov_iter.c +++ b/lib/kunit_iov_iter.c @@ -57,15 +57,12 @@ static void *__init iov_kunit_create_buffer(struct kunit *test, KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pages); *ppages = pages; - got = alloc_pages_bulk_array(GFP_KERNEL, npages, pages); + got = alloc_pages_bulk(GFP_KERNEL, npages, pages); if (got != npages) { release_pages(pages, got); KUNIT_ASSERT_EQ(test, got, npages); } - for (int i = 0; i < npages; i++) - pages[i]->index = i; - buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buffer); diff --git a/lib/list_debug.c b/lib/list_debug.c index db602417febf..ee7eeeb8f92c 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c @@ -22,17 +22,17 @@ __list_valid_slowpath bool __list_add_valid_or_report(struct list_head *new, struct list_head *prev, struct list_head *next) { - if (CHECK_DATA_CORRUPTION(prev == NULL, + if (CHECK_DATA_CORRUPTION(prev == NULL, NULL, "list_add corruption. prev is NULL.\n") || - CHECK_DATA_CORRUPTION(next == NULL, + CHECK_DATA_CORRUPTION(next == NULL, NULL, "list_add corruption. next is NULL.\n") || - CHECK_DATA_CORRUPTION(next->prev != prev, + CHECK_DATA_CORRUPTION(next->prev != prev, next, "list_add corruption. next->prev should be prev (%px), but was %px. (next=%px).\n", prev, next->prev, next) || - CHECK_DATA_CORRUPTION(prev->next != next, + CHECK_DATA_CORRUPTION(prev->next != next, prev, "list_add corruption. prev->next should be next (%px), but was %px. (prev=%px).\n", next, prev->next, prev) || - CHECK_DATA_CORRUPTION(new == prev || new == next, + CHECK_DATA_CORRUPTION(new == prev || new == next, NULL, "list_add double add: new=%px, prev=%px, next=%px.\n", new, prev, next)) return false; @@ -49,20 +49,20 @@ bool __list_del_entry_valid_or_report(struct list_head *entry) prev = entry->prev; next = entry->next; - if (CHECK_DATA_CORRUPTION(next == NULL, + if (CHECK_DATA_CORRUPTION(next == NULL, NULL, "list_del corruption, %px->next is NULL\n", entry) || - CHECK_DATA_CORRUPTION(prev == NULL, + CHECK_DATA_CORRUPTION(prev == NULL, NULL, "list_del corruption, %px->prev is NULL\n", entry) || - CHECK_DATA_CORRUPTION(next == LIST_POISON1, + CHECK_DATA_CORRUPTION(next == LIST_POISON1, next, "list_del corruption, %px->next is LIST_POISON1 (%px)\n", entry, LIST_POISON1) || - CHECK_DATA_CORRUPTION(prev == LIST_POISON2, + CHECK_DATA_CORRUPTION(prev == LIST_POISON2, prev, "list_del corruption, %px->prev is LIST_POISON2 (%px)\n", entry, LIST_POISON2) || - CHECK_DATA_CORRUPTION(prev->next != entry, + CHECK_DATA_CORRUPTION(prev->next != entry, prev, "list_del corruption. prev->next should be %px, but was %px. (prev=%px)\n", entry, prev->next, prev) || - CHECK_DATA_CORRUPTION(next->prev != entry, + CHECK_DATA_CORRUPTION(next->prev != entry, next, "list_del corruption. next->prev should be %px, but was %px. (next=%px)\n", entry, next->prev, next)) return false; diff --git a/lib/list_sort.c b/lib/list_sort.c index 8d3f623536fe..a310ecb7ccc0 100644 --- a/lib/list_sort.c +++ b/lib/list_sort.c @@ -108,6 +108,13 @@ static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head, * and list_sort is a stable sort, so it is not necessary to distinguish * the @a < @b and @a == @b cases. * + * The comparison function must adhere to specific mathematical properties + * to ensure correct and stable sorting: + * - Antisymmetry: cmp(@a, @b) must return the opposite sign of + * cmp(@b, @a). + * - Transitivity: if cmp(@a, @b) <= 0 and cmp(@b, @c) <= 0, then + * cmp(@a, @c) <= 0. + * * This is compatible with two styles of @cmp function: * - The traditional style which returns <0 / =0 / >0, or * - Returning a boolean 0/1. diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c index b0bbeeb74b9e..2a397bb2c661 100644 --- a/lib/lz4/lz4_compress.c +++ b/lib/lz4/lz4_compress.c @@ -33,7 +33,6 @@ /*-************************************ * Dependencies **************************************/ -#include <linux/lz4.h> #include "lz4defs.h" #include <linux/module.h> #include <linux/kernel.h> diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c index 0e31e6da5ce7..3a2cd9acada4 100644 --- a/lib/lz4/lz4_decompress.c +++ b/lib/lz4/lz4_decompress.c @@ -33,7 +33,6 @@ /*-************************************ * Dependencies **************************************/ -#include <linux/lz4.h> #include "lz4defs.h" #include <linux/init.h> #include <linux/module.h> diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h index cb358d6bde5a..17277ec16919 100644 --- a/lib/lz4/lz4defs.h +++ b/lib/lz4/lz4defs.h @@ -39,6 +39,7 @@ #include <linux/bitops.h> #include <linux/string.h> /* memset, memcpy */ +#include <linux/lz4.h> #define FORCE_INLINE __always_inline @@ -92,8 +93,7 @@ typedef uintptr_t uptrval; #define MB (1 << 20) #define GB (1U << 30) -#define MAXD_LOG 16 -#define MAX_DISTANCE ((1 << MAXD_LOG) - 1) +#define MAX_DISTANCE LZ4_DISTANCE_MAX #define STEPSIZE sizeof(size_t) #define ML_BITS 4 diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c index bc45594ad2a8..91936dc3d14b 100644 --- a/lib/lz4/lz4hc_compress.c +++ b/lib/lz4/lz4hc_compress.c @@ -34,7 +34,6 @@ /*-************************************ * Dependencies **************************************/ -#include <linux/lz4.h> #include "lz4defs.h" #include <linux/module.h> #include <linux/kernel.h> diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 047397136f15..f7153ade1be5 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1863,11 +1863,11 @@ static inline int mab_no_null_split(struct maple_big_node *b_node, * Return: The first split location. The middle split is set in @mid_split. */ static inline int mab_calc_split(struct ma_state *mas, - struct maple_big_node *bn, unsigned char *mid_split, unsigned long min) + struct maple_big_node *bn, unsigned char *mid_split) { unsigned char b_end = bn->b_end; int split = b_end / 2; /* Assume equal split. */ - unsigned char slot_min, slot_count = mt_slots[bn->type]; + unsigned char slot_count = mt_slots[bn->type]; /* * To support gap tracking, all NULL entries are kept together and a node cannot @@ -1900,18 +1900,7 @@ static inline int mab_calc_split(struct ma_state *mas, split = b_end / 3; *mid_split = split * 2; } else { - slot_min = mt_min_slots[bn->type]; - *mid_split = 0; - /* - * Avoid having a range less than the slot count unless it - * causes one node to be deficient. - * NOTE: mt_min_slots is 1 based, b_end and split are zero. - */ - while ((split < slot_count - 1) && - ((bn->pivot[split] - min) < slot_count - 1) && - (b_end - split > slot_min)) - split++; } /* Avoid ending a node on a NULL entry */ @@ -2377,7 +2366,7 @@ static inline struct maple_enode static inline unsigned char mas_mab_to_node(struct ma_state *mas, struct maple_big_node *b_node, struct maple_enode **left, struct maple_enode **right, struct maple_enode **middle, - unsigned char *mid_split, unsigned long min) + unsigned char *mid_split) { unsigned char split = 0; unsigned char slot_count = mt_slots[b_node->type]; @@ -2390,7 +2379,7 @@ static inline unsigned char mas_mab_to_node(struct ma_state *mas, if (b_node->b_end < slot_count) { split = b_node->b_end; } else { - split = mab_calc_split(mas, b_node, mid_split, min); + split = mab_calc_split(mas, b_node, mid_split); *right = mas_new_ma_node(mas, b_node); } @@ -2877,7 +2866,7 @@ static void mas_spanning_rebalance(struct ma_state *mas, mast->bn->b_end--; mast->bn->type = mte_node_type(mast->orig_l->node); split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle, - &mid_split, mast->orig_l->min); + &mid_split); mast_set_split_parents(mast, left, middle, right, split, mid_split); mast_cp_to_nodes(mast, left, middle, right, split, mid_split); @@ -3365,7 +3354,7 @@ static void mas_split(struct ma_state *mas, struct maple_big_node *b_node) if (mas_push_data(mas, height, &mast, false)) break; - split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min); + split = mab_calc_split(mas, b_node, &mid_split); mast_split_data(&mast, mas, split); /* * Usually correct, mab_mas_cp in the above call overwrites @@ -4746,29 +4735,6 @@ again: } /* - * mas_next_entry() - Internal function to get the next entry. - * @mas: The maple state - * @limit: The maximum range start. - * - * Set the @mas->node to the next entry and the range_start to - * the beginning value for the entry. Does not check beyond @limit. - * Sets @mas->index and @mas->last to the range, Does not update @mas->index and - * @mas->last on overflow. - * Restarts on dead nodes. - * - * Return: the next entry or %NULL. - */ -static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit) -{ - if (mas->last >= limit) { - mas->status = ma_overflow; - return NULL; - } - - return mas_next_slot(mas, limit, false); -} - -/* * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the * highest gap address of a given size in a given node and descend. * @mas: The maple state @@ -4903,15 +4869,14 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size) if (gap >= size) { if (ma_is_leaf(type)) { found = true; - goto done; - } - if (mas->index <= pivot) { - mas->node = mas_slot(mas, slots, offset); - mas->min = min; - mas->max = pivot; - offset = 0; break; } + + mas->node = mas_slot(mas, slots, offset); + mas->min = min; + mas->max = pivot; + offset = 0; + break; } next_slot: min = pivot + 1; @@ -4921,9 +4886,6 @@ next_slot: } } - if (mte_is_root(mas->node)) - found = true; -done: mas->offset = offset; return found; } @@ -5027,8 +4989,8 @@ static inline void mas_awalk(struct ma_state *mas, unsigned long size) * There are 4 options: * go to child (descend) * go back to parent (ascend) - * no gap found. (return, slot == MAPLE_NODE_SLOTS) - * found the gap. (return, slot != MAPLE_NODE_SLOTS) + * no gap found. (return, error == -EBUSY) + * found the gap. (return) */ while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) { if (last == mas->node) @@ -5113,9 +5075,6 @@ int mas_empty_area(struct ma_state *mas, unsigned long min, return xa_err(mas->node); offset = mas->offset; - if (unlikely(offset == MAPLE_NODE_SLOTS)) - return -EBUSY; - node = mas_mn(mas); mt = mte_node_type(mas->node); pivots = ma_pivots(node, mt); @@ -6938,7 +6897,7 @@ retry: goto unlock; while (mas_is_active(&mas) && (mas.last < max)) { - entry = mas_next_entry(&mas, max); + entry = mas_next_slot(&mas, max, false); if (likely(entry && !xa_is_zero(entry))) break; } @@ -7597,7 +7556,7 @@ void mt_validate(struct maple_tree *mt) MAS_WARN_ON(&mas, mte_dead_node(mas.node)); end = mas_data_end(&mas); if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) && - (mas.max != ULONG_MAX))) { + (!mte_is_root(mas.node)))) { pr_err("Invalid size %u of " PTR_FMT "\n", end, mas_mn(&mas)); } diff --git a/lib/math/Makefile b/lib/math/Makefile index 3ef11305f8d2..853f023ae537 100644 --- a/lib/math/Makefile +++ b/lib/math/Makefile @@ -9,3 +9,4 @@ obj-$(CONFIG_INT_POW_TEST) += tests/int_pow_kunit.o obj-$(CONFIG_TEST_DIV64) += test_div64.o obj-$(CONFIG_TEST_MULDIV64) += test_mul_u64_u64_div_u64.o obj-$(CONFIG_RATIONAL_KUNIT_TEST) += rational-test.o +obj-$(CONFIG_INT_SQRT_KUNIT_TEST) += tests/int_sqrt_kunit.o
\ No newline at end of file diff --git a/lib/math/tests/Makefile b/lib/math/tests/Makefile index 6a169123320a..e1a79f093b2d 100644 --- a/lib/math/tests/Makefile +++ b/lib/math/tests/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_INT_POW_TEST) += int_pow_kunit.o +obj-$(CONFIG_INT_SQRT_KUNIT_TEST) += int_sqrt_kunit.o diff --git a/lib/math/tests/int_sqrt_kunit.c b/lib/math/tests/int_sqrt_kunit.c new file mode 100644 index 000000000000..1798e1312eb7 --- /dev/null +++ b/lib/math/tests/int_sqrt_kunit.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <kunit/test.h> +#include <linux/limits.h> +#include <linux/math.h> +#include <linux/module.h> +#include <linux/string.h> + +struct test_case_params { + unsigned long x; + unsigned long expected_result; + const char *name; +}; + +static const struct test_case_params params[] = { + { 0, 0, "edge case: square root of 0" }, + { 1, 1, "perfect square: square root of 1" }, + { 2, 1, "non-perfect square: square root of 2" }, + { 3, 1, "non-perfect square: square root of 3" }, + { 4, 2, "perfect square: square root of 4" }, + { 5, 2, "non-perfect square: square root of 5" }, + { 6, 2, "non-perfect square: square root of 6" }, + { 7, 2, "non-perfect square: square root of 7" }, + { 8, 2, "non-perfect square: square root of 8" }, + { 9, 3, "perfect square: square root of 9" }, + { 15, 3, "non-perfect square: square root of 15 (N-1 from 16)" }, + { 16, 4, "perfect square: square root of 16" }, + { 17, 4, "non-perfect square: square root of 17 (N+1 from 16)" }, + { 80, 8, "non-perfect square: square root of 80 (N-1 from 81)" }, + { 81, 9, "perfect square: square root of 81" }, + { 82, 9, "non-perfect square: square root of 82 (N+1 from 81)" }, + { 255, 15, "non-perfect square: square root of 255 (N-1 from 256)" }, + { 256, 16, "perfect square: square root of 256" }, + { 257, 16, "non-perfect square: square root of 257 (N+1 from 256)" }, + { 2147483648, 46340, "large input: square root of 2147483648" }, + { 4294967295, 65535, "edge case: ULONG_MAX for 32-bit" }, +}; + +static void get_desc(const struct test_case_params *tc, char *desc) +{ + strscpy(desc, tc->name, KUNIT_PARAM_DESC_SIZE); +} + +KUNIT_ARRAY_PARAM(int_sqrt, params, get_desc); + +static void int_sqrt_test(struct kunit *test) +{ + const struct test_case_params *tc = (const struct test_case_params *)test->param_value; + + KUNIT_EXPECT_EQ(test, tc->expected_result, int_sqrt(tc->x)); +} + +static struct kunit_case math_int_sqrt_test_cases[] = { + KUNIT_CASE_PARAM(int_sqrt_test, int_sqrt_gen_params), + {} +}; + +static struct kunit_suite int_sqrt_test_suite = { + .name = "math-int_sqrt", + .test_cases = math_int_sqrt_test_cases, +}; + +kunit_test_suites(&int_sqrt_test_suite); + +MODULE_DESCRIPTION("math.int_sqrt KUnit test suite"); +MODULE_LICENSE("GPL"); diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 6c902639728b..3e555d012ed6 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -584,10 +584,6 @@ static struct bucket_table *rhashtable_insert_one( */ rht_assign_locked(bkt, obj); - atomic_inc(&ht->nelems); - if (rht_grow_above_75(ht, tbl)) - schedule_work(&ht->run_work); - return NULL; } @@ -615,15 +611,23 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); data = ERR_PTR(-EAGAIN); } else { + bool inserted; + flags = rht_lock(tbl, bkt); data = rhashtable_lookup_one(ht, bkt, tbl, hash, key, obj); new_tbl = rhashtable_insert_one(ht, bkt, tbl, hash, obj, data); + inserted = data && !new_tbl; + if (inserted) + atomic_inc(&ht->nelems); if (PTR_ERR(new_tbl) != -EEXIST) data = ERR_CAST(new_tbl); rht_unlock(tbl, bkt, flags); + + if (inserted && rht_grow_above_75(ht, tbl)) + schedule_work(&ht->run_work); } } while (!IS_ERR_OR_NULL(new_tbl)); @@ -665,7 +669,7 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow); * structure outside the hash table. * * This function may be called from any process context, including - * non-preemptable context, but cannot be called from softirq or + * non-preemptible context, but cannot be called from softirq or * hardirq context. * * You must call rhashtable_walk_exit after this function returns. diff --git a/lib/sort.c b/lib/sort.c index 048b7a6ef967..8e73dc55476b 100644 --- a/lib/sort.c +++ b/lib/sort.c @@ -200,6 +200,13 @@ static size_t parent(size_t i, unsigned int lsbit, size_t size) * copy (e.g. fix up pointers or auxiliary data), but the built-in swap * avoids a slow retpoline and so is significantly faster. * + * The comparison function must adhere to specific mathematical + * properties to ensure correct and stable sorting: + * - Antisymmetry: cmp_func(a, b) must return the opposite sign of + * cmp_func(b, a). + * - Transitivity: if cmp_func(a, b) <= 0 and cmp_func(b, c) <= 0, then + * cmp_func(a, c) <= 0. + * * Sorting time is O(n log n) both on average and worst-case. While * quicksort is slightly faster on average, it suffers from exploitable * O(n*n) worst-case behavior and extra memory requirements that make diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 704cb1093ae8..13e2a10d7554 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -1563,6 +1563,30 @@ static noinline void __init check_root_expand(struct maple_tree *mt) mas_unlock(&mas); } +static noinline void __init check_deficient_node(struct maple_tree *mt) +{ + MA_STATE(mas, mt, 0, 0); + int count; + + mas_lock(&mas); + for (count = 0; count < 10; count++) { + mas_set(&mas, count); + mas_store_gfp(&mas, xa_mk_value(count), GFP_KERNEL); + } + + for (count = 20; count < 39; count++) { + mas_set(&mas, count); + mas_store_gfp(&mas, xa_mk_value(count), GFP_KERNEL); + } + + for (count = 10; count < 12; count++) { + mas_set(&mas, count); + mas_store_gfp(&mas, xa_mk_value(count), GFP_KERNEL); + } + mas_unlock(&mas); + mt_validate(mt); +} + static noinline void __init check_gap_combining(struct maple_tree *mt) { struct maple_enode *mn1, *mn2; @@ -3714,6 +3738,34 @@ static noinline void __init alloc_cyclic_testing(struct maple_tree *mt) } mtree_destroy(mt); + + /* + * Issue with reverse search was discovered + * https://lore.kernel.org/all/20241216060600.287B4C4CED0@smtp.kernel.org/ + * Exhausting the allocation area and forcing the search to wrap needs a + * mas_reset() in mas_alloc_cyclic(). + */ + next = 0; + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + for (int i = 0; i < 1023; i++) { + mtree_alloc_cyclic(mt, &location, mt, 2, 1024, &next, GFP_KERNEL); + MT_BUG_ON(mt, i != location - 2); + MT_BUG_ON(mt, i != next - 3); + MT_BUG_ON(mt, mtree_load(mt, location) != mt); + } + mtree_erase(mt, 123); + MT_BUG_ON(mt, mtree_load(mt, 123) != NULL); + mtree_alloc_cyclic(mt, &location, mt, 2, 1024, &next, GFP_KERNEL); + MT_BUG_ON(mt, 123 != location); + MT_BUG_ON(mt, 124 != next); + MT_BUG_ON(mt, mtree_load(mt, location) != mt); + mtree_erase(mt, 100); + mtree_alloc_cyclic(mt, &location, mt, 2, 1024, &next, GFP_KERNEL); + MT_BUG_ON(mt, 100 != location); + MT_BUG_ON(mt, 101 != next); + MT_BUG_ON(mt, mtree_load(mt, location) != mt); + mtree_destroy(mt); + /* Overflow test */ next = ULONG_MAX - 1; ret = mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL); @@ -3797,6 +3849,10 @@ static int __init maple_tree_seed(void) #endif mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + check_deficient_node(&tree); + mtree_destroy(&tree); + + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); check_store_null(&tree); mtree_destroy(&tree); diff --git a/lib/test_min_heap.c b/lib/test_min_heap.c index e6fbb798558b..a9c4a74d3898 100644 --- a/lib/test_min_heap.c +++ b/lib/test_min_heap.c @@ -32,7 +32,7 @@ static __init int pop_verify_heap(bool min_heap, int last; last = values[0]; - min_heap_pop(heap, funcs, NULL); + min_heap_pop_inline(heap, funcs, NULL); while (heap->nr > 0) { if (min_heap) { if (last > values[0]) { @@ -48,7 +48,7 @@ static __init int pop_verify_heap(bool min_heap, } } last = values[0]; - min_heap_pop(heap, funcs, NULL); + min_heap_pop_inline(heap, funcs, NULL); } return err; } @@ -69,7 +69,7 @@ static __init int test_heapify_all(bool min_heap) int i, err; /* Test with known set of values. */ - min_heapify_all(&heap, &funcs, NULL); + min_heapify_all_inline(&heap, &funcs, NULL); err = pop_verify_heap(min_heap, &heap, &funcs); @@ -78,7 +78,7 @@ static __init int test_heapify_all(bool min_heap) for (i = 0; i < heap.nr; i++) values[i] = get_random_u32(); - min_heapify_all(&heap, &funcs, NULL); + min_heapify_all_inline(&heap, &funcs, NULL); err += pop_verify_heap(min_heap, &heap, &funcs); return err; @@ -102,14 +102,14 @@ static __init int test_heap_push(bool min_heap) /* Test with known set of values copied from data. */ for (i = 0; i < ARRAY_SIZE(data); i++) - min_heap_push(&heap, &data[i], &funcs, NULL); + min_heap_push_inline(&heap, &data[i], &funcs, NULL); err = pop_verify_heap(min_heap, &heap, &funcs); /* Test with randomly generated values. */ while (heap.nr < heap.size) { temp = get_random_u32(); - min_heap_push(&heap, &temp, &funcs, NULL); + min_heap_push_inline(&heap, &temp, &funcs, NULL); } err += pop_verify_heap(min_heap, &heap, &funcs); @@ -135,22 +135,22 @@ static __init int test_heap_pop_push(bool min_heap) /* Fill values with data to pop and replace. */ temp = min_heap ? 0x80000000 : 0x7FFFFFFF; for (i = 0; i < ARRAY_SIZE(data); i++) - min_heap_push(&heap, &temp, &funcs, NULL); + min_heap_push_inline(&heap, &temp, &funcs, NULL); /* Test with known set of values copied from data. */ for (i = 0; i < ARRAY_SIZE(data); i++) - min_heap_pop_push(&heap, &data[i], &funcs, NULL); + min_heap_pop_push_inline(&heap, &data[i], &funcs, NULL); err = pop_verify_heap(min_heap, &heap, &funcs); heap.nr = 0; for (i = 0; i < ARRAY_SIZE(data); i++) - min_heap_push(&heap, &temp, &funcs, NULL); + min_heap_push_inline(&heap, &temp, &funcs, NULL); /* Test with randomly generated values. */ for (i = 0; i < ARRAY_SIZE(data); i++) { temp = get_random_u32(); - min_heap_pop_push(&heap, &temp, &funcs, NULL); + min_heap_pop_push_inline(&heap, &temp, &funcs, NULL); } err += pop_verify_heap(min_heap, &heap, &funcs); @@ -163,7 +163,7 @@ static __init int test_heap_del(bool min_heap) -3, -1, -2, -4, 0x8000000, 0x7FFFFFF }; struct min_heap_test heap; - min_heap_init(&heap, values, ARRAY_SIZE(values)); + min_heap_init_inline(&heap, values, ARRAY_SIZE(values)); heap.nr = ARRAY_SIZE(values); struct min_heap_callbacks funcs = { .less = min_heap ? less_than : greater_than, @@ -172,9 +172,9 @@ static __init int test_heap_del(bool min_heap) int i, err; /* Test with known set of values. */ - min_heapify_all(&heap, &funcs, NULL); + min_heapify_all_inline(&heap, &funcs, NULL); for (i = 0; i < ARRAY_SIZE(values) / 2; i++) - min_heap_del(&heap, get_random_u32() % heap.nr, &funcs, NULL); + min_heap_del_inline(&heap, get_random_u32() % heap.nr, &funcs, NULL); err = pop_verify_heap(min_heap, &heap, &funcs); @@ -182,10 +182,10 @@ static __init int test_heap_del(bool min_heap) heap.nr = ARRAY_SIZE(values); for (i = 0; i < heap.nr; i++) values[i] = get_random_u32(); - min_heapify_all(&heap, &funcs, NULL); + min_heapify_all_inline(&heap, &funcs, NULL); for (i = 0; i < ARRAY_SIZE(values) / 2; i++) - min_heap_del(&heap, get_random_u32() % heap.nr, &funcs, NULL); + min_heap_del_inline(&heap, get_random_u32() % heap.nr, &funcs, NULL); err += pop_verify_heap(min_heap, &heap, &funcs); return err; diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c index b6696fa1d426..4249e0cc8aaf 100644 --- a/lib/test_sysctl.c +++ b/lib/test_sysctl.c @@ -71,7 +71,7 @@ static struct test_sysctl_data test_data = { }; /* These are all under /proc/sys/debug/test_sysctl/ */ -static struct ctl_table test_table[] = { +static const struct ctl_table test_table[] = { { .procname = "int_0001", .data = &test_data.int_0001, @@ -177,7 +177,7 @@ static int test_sysctl_setup_node_tests(void) } /* Used to test that unregister actually removes the directory */ -static struct ctl_table test_table_unregister[] = { +static const struct ctl_table test_table_unregister[] = { { .procname = "unregister_error", .data = &test_data.int_0001, @@ -220,7 +220,7 @@ static int test_sysctl_run_register_mount_point(void) return 0; } -static struct ctl_table test_table_empty[] = { }; +static const struct ctl_table test_table_empty[] = { }; static int test_sysctl_run_register_empty(void) { diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c index 4ddf769861ff..f585949ff696 100644 --- a/lib/test_vmalloc.c +++ b/lib/test_vmalloc.c @@ -373,7 +373,7 @@ vm_map_ram_test(void) if (!pages) return -1; - nr_allocated = alloc_pages_bulk_array(GFP_KERNEL, map_nr_pages, pages); + nr_allocated = alloc_pages_bulk(GFP_KERNEL, map_nr_pages, pages); if (nr_allocated != map_nr_pages) goto cleanup; diff --git a/lib/test_xarray.c b/lib/test_xarray.c index d5c5cbba33ed..eab5971d0a48 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -6,11 +6,10 @@ * Author: Matthew Wilcox <willy@infradead.org> */ -#include <linux/xarray.h> -#include <linux/module.h> +#include <kunit/test.h> -static unsigned int tests_run; -static unsigned int tests_passed; +#include <linux/module.h> +#include <linux/xarray.h> static const unsigned int order_limit = IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1; @@ -20,15 +19,12 @@ static const unsigned int order_limit = void xa_dump(const struct xarray *xa) { } # endif #undef XA_BUG_ON -#define XA_BUG_ON(xa, x) do { \ - tests_run++; \ - if (x) { \ - printk("BUG at %s:%d\n", __func__, __LINE__); \ - xa_dump(xa); \ - dump_stack(); \ - } else { \ - tests_passed++; \ - } \ +#define XA_BUG_ON(xa, x) do { \ + if (x) { \ + KUNIT_FAIL(test, #x); \ + xa_dump(xa); \ + dump_stack(); \ + } \ } while (0) #endif @@ -42,13 +38,13 @@ static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) return xa_store(xa, index, xa_mk_index(index), gfp); } -static void xa_insert_index(struct xarray *xa, unsigned long index) +static void xa_insert_index(struct kunit *test, struct xarray *xa, unsigned long index) { XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index), GFP_KERNEL) != 0); } -static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) +static void xa_alloc_index(struct kunit *test, struct xarray *xa, unsigned long index, gfp_t gfp) { u32 id; @@ -57,7 +53,7 @@ static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) XA_BUG_ON(xa, id != index); } -static void xa_erase_index(struct xarray *xa, unsigned long index) +static void xa_erase_index(struct kunit *test, struct xarray *xa, unsigned long index) { XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index)); XA_BUG_ON(xa, xa_load(xa, index) != NULL); @@ -83,8 +79,15 @@ static void *xa_store_order(struct xarray *xa, unsigned long index, return curr; } -static noinline void check_xa_err(struct xarray *xa) +static inline struct xarray *xa_param(struct kunit *test) { + return *(struct xarray **)test->param_value; +} + +static noinline void check_xa_err(struct kunit *test) +{ + struct xarray *xa = xa_param(test); + XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0); XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0); #ifndef __KERNEL__ @@ -99,8 +102,10 @@ static noinline void check_xa_err(struct xarray *xa) // XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL); } -static noinline void check_xas_retry(struct xarray *xa) +static noinline void check_xas_retry(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); void *entry; @@ -109,7 +114,7 @@ static noinline void check_xas_retry(struct xarray *xa) rcu_read_lock(); XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0)); - xa_erase_index(xa, 1); + xa_erase_index(test, xa, 1); XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas))); XA_BUG_ON(xa, xas_retry(&xas, NULL)); XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0))); @@ -140,12 +145,14 @@ static noinline void check_xas_retry(struct xarray *xa) } xas_unlock(&xas); - xa_erase_index(xa, 0); - xa_erase_index(xa, 1); + xa_erase_index(test, xa, 0); + xa_erase_index(test, xa, 1); } -static noinline void check_xa_load(struct xarray *xa) +static noinline void check_xa_load(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned long i, j; for (i = 0; i < 1024; i++) { @@ -167,13 +174,15 @@ static noinline void check_xa_load(struct xarray *xa) else XA_BUG_ON(xa, entry); } - xa_erase_index(xa, i); + xa_erase_index(test, xa, i); } XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) +static noinline void check_xa_mark_1(struct kunit *test, unsigned long index) { + struct xarray *xa = xa_param(test); + unsigned int order; unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1; @@ -193,7 +202,7 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1)); /* Storing NULL clears marks, and they can't be set again */ - xa_erase_index(xa, index); + xa_erase_index(test, xa, index); XA_BUG_ON(xa, !xa_empty(xa)); XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); xa_set_mark(xa, index, XA_MARK_0); @@ -244,15 +253,17 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0)); XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1)); XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2)); - xa_erase_index(xa, index); - xa_erase_index(xa, next); + xa_erase_index(test, xa, index); + xa_erase_index(test, xa, next); XA_BUG_ON(xa, !xa_empty(xa)); } XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_xa_mark_2(struct xarray *xa) +static noinline void check_xa_mark_2(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); unsigned long index; unsigned int count = 0; @@ -289,9 +300,11 @@ static noinline void check_xa_mark_2(struct xarray *xa) xa_destroy(xa); } -static noinline void check_xa_mark_3(struct xarray *xa) +static noinline void check_xa_mark_3(struct kunit *test) { #ifdef CONFIG_XARRAY_MULTI + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0x41); void *entry; int count = 0; @@ -310,19 +323,21 @@ static noinline void check_xa_mark_3(struct xarray *xa) #endif } -static noinline void check_xa_mark(struct xarray *xa) +static noinline void check_xa_mark(struct kunit *test) { unsigned long index; for (index = 0; index < 16384; index += 4) - check_xa_mark_1(xa, index); + check_xa_mark_1(test, index); - check_xa_mark_2(xa); - check_xa_mark_3(xa); + check_xa_mark_2(test); + check_xa_mark_3(test); } -static noinline void check_xa_shrink(struct xarray *xa) +static noinline void check_xa_shrink(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 1); struct xa_node *node; unsigned int order; @@ -347,7 +362,7 @@ static noinline void check_xa_shrink(struct xarray *xa) XA_BUG_ON(xa, xas_load(&xas) != NULL); xas_unlock(&xas); XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); - xa_erase_index(xa, 0); + xa_erase_index(test, xa, 0); XA_BUG_ON(xa, !xa_empty(xa)); for (order = 0; order < max_order; order++) { @@ -364,45 +379,49 @@ static noinline void check_xa_shrink(struct xarray *xa) XA_BUG_ON(xa, xa_head(xa) == node); rcu_read_unlock(); XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); - xa_erase_index(xa, ULONG_MAX); + xa_erase_index(test, xa, ULONG_MAX); XA_BUG_ON(xa, xa->xa_head != node); - xa_erase_index(xa, 0); + xa_erase_index(test, xa, 0); } } -static noinline void check_insert(struct xarray *xa) +static noinline void check_insert(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned long i; for (i = 0; i < 1024; i++) { - xa_insert_index(xa, i); + xa_insert_index(test, xa, i); XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL); XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL); - xa_erase_index(xa, i); + xa_erase_index(test, xa, i); } for (i = 10; i < BITS_PER_LONG; i++) { - xa_insert_index(xa, 1UL << i); + xa_insert_index(test, xa, 1UL << i); XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL); XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL); - xa_erase_index(xa, 1UL << i); + xa_erase_index(test, xa, 1UL << i); - xa_insert_index(xa, (1UL << i) - 1); + xa_insert_index(test, xa, (1UL << i) - 1); XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL); XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL); - xa_erase_index(xa, (1UL << i) - 1); + xa_erase_index(test, xa, (1UL << i) - 1); } - xa_insert_index(xa, ~0UL); + xa_insert_index(test, xa, ~0UL); XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL); XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL); - xa_erase_index(xa, ~0UL); + xa_erase_index(test, xa, ~0UL); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_cmpxchg(struct xarray *xa) +static noinline void check_cmpxchg(struct kunit *test) { + struct xarray *xa = xa_param(test); + void *FIVE = xa_mk_value(5); void *SIX = xa_mk_value(6); void *LOTS = xa_mk_value(12345678); @@ -418,14 +437,16 @@ static noinline void check_cmpxchg(struct xarray *xa) XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY); XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE); XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY); - xa_erase_index(xa, 12345678); - xa_erase_index(xa, 5); + xa_erase_index(test, xa, 12345678); + xa_erase_index(test, xa, 5); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_cmpxchg_order(struct xarray *xa) +static noinline void check_cmpxchg_order(struct kunit *test) { #ifdef CONFIG_XARRAY_MULTI + struct xarray *xa = xa_param(test); + void *FIVE = xa_mk_value(5); unsigned int i, order = 3; @@ -476,8 +497,10 @@ static noinline void check_cmpxchg_order(struct xarray *xa) #endif } -static noinline void check_reserve(struct xarray *xa) +static noinline void check_reserve(struct kunit *test) { + struct xarray *xa = xa_param(test); + void *entry; unsigned long index; int count; @@ -494,7 +517,7 @@ static noinline void check_reserve(struct xarray *xa) XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL); xa_release(xa, 12345678); - xa_erase_index(xa, 12345678); + xa_erase_index(test, xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); /* cmpxchg sees a reserved entry as ZERO */ @@ -502,7 +525,7 @@ static noinline void check_reserve(struct xarray *xa) XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY, xa_mk_value(12345678), GFP_NOWAIT) != NULL); xa_release(xa, 12345678); - xa_erase_index(xa, 12345678); + xa_erase_index(test, xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); /* xa_insert treats it as busy */ @@ -542,8 +565,10 @@ static noinline void check_reserve(struct xarray *xa) xa_destroy(xa); } -static noinline void check_xas_erase(struct xarray *xa) +static noinline void check_xas_erase(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); void *entry; unsigned long i, j; @@ -581,9 +606,11 @@ static noinline void check_xas_erase(struct xarray *xa) } #ifdef CONFIG_XARRAY_MULTI -static noinline void check_multi_store_1(struct xarray *xa, unsigned long index, +static noinline void check_multi_store_1(struct kunit *test, unsigned long index, unsigned int order) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, index); unsigned long min = index & ~((1UL << order) - 1); unsigned long max = min + (1UL << order); @@ -602,13 +629,15 @@ static noinline void check_multi_store_1(struct xarray *xa, unsigned long index, XA_BUG_ON(xa, xa_load(xa, max) != NULL); XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); - xa_erase_index(xa, min); + xa_erase_index(test, xa, min); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_multi_store_2(struct xarray *xa, unsigned long index, +static noinline void check_multi_store_2(struct kunit *test, unsigned long index, unsigned int order) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, index); xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL); @@ -620,9 +649,11 @@ static noinline void check_multi_store_2(struct xarray *xa, unsigned long index, XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_multi_store_3(struct xarray *xa, unsigned long index, +static noinline void check_multi_store_3(struct kunit *test, unsigned long index, unsigned int order) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); void *entry; int n = 0; @@ -647,9 +678,11 @@ static noinline void check_multi_store_3(struct xarray *xa, unsigned long index, } #endif -static noinline void check_multi_store(struct xarray *xa) +static noinline void check_multi_store(struct kunit *test) { #ifdef CONFIG_XARRAY_MULTI + struct xarray *xa = xa_param(test); + unsigned long i, j, k; unsigned int max_order = (sizeof(long) == 4) ? 30 : 60; @@ -714,26 +747,28 @@ static noinline void check_multi_store(struct xarray *xa) } for (i = 0; i < 20; i++) { - check_multi_store_1(xa, 200, i); - check_multi_store_1(xa, 0, i); - check_multi_store_1(xa, (1UL << i) + 1, i); + check_multi_store_1(test, 200, i); + check_multi_store_1(test, 0, i); + check_multi_store_1(test, (1UL << i) + 1, i); } - check_multi_store_2(xa, 4095, 9); + check_multi_store_2(test, 4095, 9); for (i = 1; i < 20; i++) { - check_multi_store_3(xa, 0, i); - check_multi_store_3(xa, 1UL << i, i); + check_multi_store_3(test, 0, i); + check_multi_store_3(test, 1UL << i, i); } #endif } #ifdef CONFIG_XARRAY_MULTI /* mimics page cache __filemap_add_folio() */ -static noinline void check_xa_multi_store_adv_add(struct xarray *xa, +static noinline void check_xa_multi_store_adv_add(struct kunit *test, unsigned long index, unsigned int order, void *p) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, index); unsigned int nrpages = 1UL << order; @@ -761,10 +796,12 @@ static noinline void check_xa_multi_store_adv_add(struct xarray *xa, } /* mimics page_cache_delete() */ -static noinline void check_xa_multi_store_adv_del_entry(struct xarray *xa, +static noinline void check_xa_multi_store_adv_del_entry(struct kunit *test, unsigned long index, unsigned int order) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, index); xas_set_order(&xas, index, order); @@ -772,12 +809,14 @@ static noinline void check_xa_multi_store_adv_del_entry(struct xarray *xa, xas_init_marks(&xas); } -static noinline void check_xa_multi_store_adv_delete(struct xarray *xa, +static noinline void check_xa_multi_store_adv_delete(struct kunit *test, unsigned long index, unsigned int order) { + struct xarray *xa = xa_param(test); + xa_lock_irq(xa); - check_xa_multi_store_adv_del_entry(xa, index, order); + check_xa_multi_store_adv_del_entry(test, index, order); xa_unlock_irq(xa); } @@ -814,10 +853,12 @@ static unsigned long some_val = 0xdeadbeef; static unsigned long some_val_2 = 0xdeaddead; /* mimics the page cache usage */ -static noinline void check_xa_multi_store_adv(struct xarray *xa, +static noinline void check_xa_multi_store_adv(struct kunit *test, unsigned long pos, unsigned int order) { + struct xarray *xa = xa_param(test); + unsigned int nrpages = 1UL << order; unsigned long index, base, next_index, next_next_index; unsigned int i; @@ -827,7 +868,7 @@ static noinline void check_xa_multi_store_adv(struct xarray *xa, next_index = round_down(base + nrpages, nrpages); next_next_index = round_down(next_index + nrpages, nrpages); - check_xa_multi_store_adv_add(xa, base, order, &some_val); + check_xa_multi_store_adv_add(test, base, order, &some_val); for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, base + i) != &some_val); @@ -835,20 +876,20 @@ static noinline void check_xa_multi_store_adv(struct xarray *xa, XA_BUG_ON(xa, test_get_entry(xa, next_index) != NULL); /* Use order 0 for the next item */ - check_xa_multi_store_adv_add(xa, next_index, 0, &some_val_2); + check_xa_multi_store_adv_add(test, next_index, 0, &some_val_2); XA_BUG_ON(xa, test_get_entry(xa, next_index) != &some_val_2); /* Remove the next item */ - check_xa_multi_store_adv_delete(xa, next_index, 0); + check_xa_multi_store_adv_delete(test, next_index, 0); /* Now use order for a new pointer */ - check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2); + check_xa_multi_store_adv_add(test, next_index, order, &some_val_2); for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2); - check_xa_multi_store_adv_delete(xa, next_index, order); - check_xa_multi_store_adv_delete(xa, base, order); + check_xa_multi_store_adv_delete(test, next_index, order); + check_xa_multi_store_adv_delete(test, base, order); XA_BUG_ON(xa, !xa_empty(xa)); /* starting fresh again */ @@ -856,7 +897,7 @@ static noinline void check_xa_multi_store_adv(struct xarray *xa, /* let's test some holes now */ /* hole at base and next_next */ - check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2); + check_xa_multi_store_adv_add(test, next_index, order, &some_val_2); for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL); @@ -867,12 +908,12 @@ static noinline void check_xa_multi_store_adv(struct xarray *xa, for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != NULL); - check_xa_multi_store_adv_delete(xa, next_index, order); + check_xa_multi_store_adv_delete(test, next_index, order); XA_BUG_ON(xa, !xa_empty(xa)); /* hole at base and next */ - check_xa_multi_store_adv_add(xa, next_next_index, order, &some_val_2); + check_xa_multi_store_adv_add(test, next_next_index, order, &some_val_2); for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL); @@ -883,12 +924,12 @@ static noinline void check_xa_multi_store_adv(struct xarray *xa, for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != &some_val_2); - check_xa_multi_store_adv_delete(xa, next_next_index, order); + check_xa_multi_store_adv_delete(test, next_next_index, order); XA_BUG_ON(xa, !xa_empty(xa)); } #endif -static noinline void check_multi_store_advanced(struct xarray *xa) +static noinline void check_multi_store_advanced(struct kunit *test) { #ifdef CONFIG_XARRAY_MULTI unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; @@ -900,59 +941,59 @@ static noinline void check_multi_store_advanced(struct xarray *xa) */ for (pos = 7; pos < end; pos = (pos * pos) + 564) { for (i = 0; i < max_order; i++) { - check_xa_multi_store_adv(xa, pos, i); - check_xa_multi_store_adv(xa, pos + 157, i); + check_xa_multi_store_adv(test, pos, i); + check_xa_multi_store_adv(test, pos + 157, i); } } #endif } -static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base) +static noinline void check_xa_alloc_1(struct kunit *test, struct xarray *xa, unsigned int base) { int i; u32 id; XA_BUG_ON(xa, !xa_empty(xa)); /* An empty array should assign %base to the first alloc */ - xa_alloc_index(xa, base, GFP_KERNEL); + xa_alloc_index(test, xa, base, GFP_KERNEL); /* Erasing it should make the array empty again */ - xa_erase_index(xa, base); + xa_erase_index(test, xa, base); XA_BUG_ON(xa, !xa_empty(xa)); /* And it should assign %base again */ - xa_alloc_index(xa, base, GFP_KERNEL); + xa_alloc_index(test, xa, base, GFP_KERNEL); /* Allocating and then erasing a lot should not lose base */ for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++) - xa_alloc_index(xa, i, GFP_KERNEL); + xa_alloc_index(test, xa, i, GFP_KERNEL); for (i = base; i < 2 * XA_CHUNK_SIZE; i++) - xa_erase_index(xa, i); - xa_alloc_index(xa, base, GFP_KERNEL); + xa_erase_index(test, xa, i); + xa_alloc_index(test, xa, base, GFP_KERNEL); /* Destroying the array should do the same as erasing */ xa_destroy(xa); /* And it should assign %base again */ - xa_alloc_index(xa, base, GFP_KERNEL); + xa_alloc_index(test, xa, base, GFP_KERNEL); /* The next assigned ID should be base+1 */ - xa_alloc_index(xa, base + 1, GFP_KERNEL); - xa_erase_index(xa, base + 1); + xa_alloc_index(test, xa, base + 1, GFP_KERNEL); + xa_erase_index(test, xa, base + 1); /* Storing a value should mark it used */ xa_store_index(xa, base + 1, GFP_KERNEL); - xa_alloc_index(xa, base + 2, GFP_KERNEL); + xa_alloc_index(test, xa, base + 2, GFP_KERNEL); /* If we then erase base, it should be free */ - xa_erase_index(xa, base); - xa_alloc_index(xa, base, GFP_KERNEL); + xa_erase_index(test, xa, base); + xa_alloc_index(test, xa, base, GFP_KERNEL); - xa_erase_index(xa, base + 1); - xa_erase_index(xa, base + 2); + xa_erase_index(test, xa, base + 1); + xa_erase_index(test, xa, base + 2); for (i = 1; i < 5000; i++) { - xa_alloc_index(xa, base + i, GFP_KERNEL); + xa_alloc_index(test, xa, base + i, GFP_KERNEL); } xa_destroy(xa); @@ -975,14 +1016,14 @@ static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base) XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), GFP_KERNEL) != -EBUSY); - XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0); + XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != NULL); XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), GFP_KERNEL) != -EBUSY); - xa_erase_index(xa, 3); + xa_erase_index(test, xa, 3); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base) +static noinline void check_xa_alloc_2(struct kunit *test, struct xarray *xa, unsigned int base) { unsigned int i, id; unsigned long index; @@ -1018,7 +1059,7 @@ static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base) XA_BUG_ON(xa, id != 5); xa_for_each(xa, index, entry) { - xa_erase_index(xa, index); + xa_erase_index(test, xa, index); } for (i = base; i < base + 9; i++) { @@ -1033,7 +1074,7 @@ static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base) xa_destroy(xa); } -static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) +static noinline void check_xa_alloc_3(struct kunit *test, struct xarray *xa, unsigned int base) { struct xa_limit limit = XA_LIMIT(1, 0x3fff); u32 next = 0; @@ -1049,8 +1090,8 @@ static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit, &next, GFP_KERNEL) != 0); XA_BUG_ON(xa, id != 0x3ffd); - xa_erase_index(xa, 0x3ffd); - xa_erase_index(xa, 1); + xa_erase_index(test, xa, 0x3ffd); + xa_erase_index(test, xa, 1); XA_BUG_ON(xa, !xa_empty(xa)); for (i = 0x3ffe; i < 0x4003; i++) { @@ -1065,8 +1106,8 @@ static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) /* Check wrap-around is handled correctly */ if (base != 0) - xa_erase_index(xa, base); - xa_erase_index(xa, base + 1); + xa_erase_index(test, xa, base); + xa_erase_index(test, xa, base + 1); next = UINT_MAX; XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX), xa_limit_32b, &next, GFP_KERNEL) != 0); @@ -1079,7 +1120,7 @@ static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) XA_BUG_ON(xa, id != base + 1); xa_for_each(xa, index, entry) - xa_erase_index(xa, index); + xa_erase_index(test, xa, index); XA_BUG_ON(xa, !xa_empty(xa)); } @@ -1087,19 +1128,21 @@ static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) static DEFINE_XARRAY_ALLOC(xa0); static DEFINE_XARRAY_ALLOC1(xa1); -static noinline void check_xa_alloc(void) +static noinline void check_xa_alloc(struct kunit *test) { - check_xa_alloc_1(&xa0, 0); - check_xa_alloc_1(&xa1, 1); - check_xa_alloc_2(&xa0, 0); - check_xa_alloc_2(&xa1, 1); - check_xa_alloc_3(&xa0, 0); - check_xa_alloc_3(&xa1, 1); + check_xa_alloc_1(test, &xa0, 0); + check_xa_alloc_1(test, &xa1, 1); + check_xa_alloc_2(test, &xa0, 0); + check_xa_alloc_2(test, &xa1, 1); + check_xa_alloc_3(test, &xa0, 0); + check_xa_alloc_3(test, &xa1, 1); } -static noinline void __check_store_iter(struct xarray *xa, unsigned long start, +static noinline void __check_store_iter(struct kunit *test, unsigned long start, unsigned int order, unsigned int present) { + struct xarray *xa = xa_param(test); + XA_STATE_ORDER(xas, xa, start, order); void *entry; unsigned int count = 0; @@ -1123,50 +1166,54 @@ retry: XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start)); XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != xa_mk_index(start)); - xa_erase_index(xa, start); + xa_erase_index(test, xa, start); } -static noinline void check_store_iter(struct xarray *xa) +static noinline void check_store_iter(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned int i, j; unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; for (i = 0; i < max_order; i++) { unsigned int min = 1 << i; unsigned int max = (2 << i) - 1; - __check_store_iter(xa, 0, i, 0); + __check_store_iter(test, 0, i, 0); XA_BUG_ON(xa, !xa_empty(xa)); - __check_store_iter(xa, min, i, 0); + __check_store_iter(test, min, i, 0); XA_BUG_ON(xa, !xa_empty(xa)); xa_store_index(xa, min, GFP_KERNEL); - __check_store_iter(xa, min, i, 1); + __check_store_iter(test, min, i, 1); XA_BUG_ON(xa, !xa_empty(xa)); xa_store_index(xa, max, GFP_KERNEL); - __check_store_iter(xa, min, i, 1); + __check_store_iter(test, min, i, 1); XA_BUG_ON(xa, !xa_empty(xa)); for (j = 0; j < min; j++) xa_store_index(xa, j, GFP_KERNEL); - __check_store_iter(xa, 0, i, min); + __check_store_iter(test, 0, i, min); XA_BUG_ON(xa, !xa_empty(xa)); for (j = 0; j < min; j++) xa_store_index(xa, min + j, GFP_KERNEL); - __check_store_iter(xa, min, i, min); + __check_store_iter(test, min, i, min); XA_BUG_ON(xa, !xa_empty(xa)); } #ifdef CONFIG_XARRAY_MULTI xa_store_index(xa, 63, GFP_KERNEL); xa_store_index(xa, 65, GFP_KERNEL); - __check_store_iter(xa, 64, 2, 1); - xa_erase_index(xa, 63); + __check_store_iter(test, 64, 2, 1); + xa_erase_index(test, xa, 63); #endif XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_multi_find_1(struct xarray *xa, unsigned order) +static noinline void check_multi_find_1(struct kunit *test, unsigned int order) { #ifdef CONFIG_XARRAY_MULTI + struct xarray *xa = xa_param(test); + unsigned long multi = 3 << order; unsigned long next = 4 << order; unsigned long index; @@ -1189,15 +1236,17 @@ static noinline void check_multi_find_1(struct xarray *xa, unsigned order) XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL); XA_BUG_ON(xa, index != next); - xa_erase_index(xa, multi); - xa_erase_index(xa, next); - xa_erase_index(xa, next + 1); + xa_erase_index(test, xa, multi); + xa_erase_index(test, xa, next); + xa_erase_index(test, xa, next + 1); XA_BUG_ON(xa, !xa_empty(xa)); #endif } -static noinline void check_multi_find_2(struct xarray *xa) +static noinline void check_multi_find_2(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1; unsigned int i, j; void *entry; @@ -1211,17 +1260,19 @@ static noinline void check_multi_find_2(struct xarray *xa) GFP_KERNEL); rcu_read_lock(); xas_for_each(&xas, entry, ULONG_MAX) { - xa_erase_index(xa, index); + xa_erase_index(test, xa, index); } rcu_read_unlock(); - xa_erase_index(xa, index - 1); + xa_erase_index(test, xa, index - 1); XA_BUG_ON(xa, !xa_empty(xa)); } } } -static noinline void check_multi_find_3(struct xarray *xa) +static noinline void check_multi_find_3(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned int order; for (order = 5; order < order_limit; order++) { @@ -1230,12 +1281,14 @@ static noinline void check_multi_find_3(struct xarray *xa) XA_BUG_ON(xa, !xa_empty(xa)); xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL); XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT)); - xa_erase_index(xa, 0); + xa_erase_index(test, xa, 0); } } -static noinline void check_find_1(struct xarray *xa) +static noinline void check_find_1(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned long i, j, k; XA_BUG_ON(xa, !xa_empty(xa)); @@ -1272,18 +1325,20 @@ static noinline void check_find_1(struct xarray *xa) else XA_BUG_ON(xa, entry != NULL); } - xa_erase_index(xa, j); + xa_erase_index(test, xa, j); XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0)); XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); } - xa_erase_index(xa, i); + xa_erase_index(test, xa, i); XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0)); } XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_find_2(struct xarray *xa) +static noinline void check_find_2(struct kunit *test) { + struct xarray *xa = xa_param(test); + void *entry; unsigned long i, j, index; @@ -1303,8 +1358,10 @@ static noinline void check_find_2(struct xarray *xa) xa_destroy(xa); } -static noinline void check_find_3(struct xarray *xa) +static noinline void check_find_3(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); unsigned long i, j, k; void *entry; @@ -1328,8 +1385,10 @@ static noinline void check_find_3(struct xarray *xa) xa_destroy(xa); } -static noinline void check_find_4(struct xarray *xa) +static noinline void check_find_4(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned long index = 0; void *entry; @@ -1341,22 +1400,22 @@ static noinline void check_find_4(struct xarray *xa) entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT); XA_BUG_ON(xa, entry); - xa_erase_index(xa, ULONG_MAX); + xa_erase_index(test, xa, ULONG_MAX); } -static noinline void check_find(struct xarray *xa) +static noinline void check_find(struct kunit *test) { unsigned i; - check_find_1(xa); - check_find_2(xa); - check_find_3(xa); - check_find_4(xa); + check_find_1(test); + check_find_2(test); + check_find_3(test); + check_find_4(test); for (i = 2; i < 10; i++) - check_multi_find_1(xa, i); - check_multi_find_2(xa); - check_multi_find_3(xa); + check_multi_find_1(test, i); + check_multi_find_2(test); + check_multi_find_3(test); } /* See find_swap_entry() in mm/shmem.c */ @@ -1382,8 +1441,10 @@ static noinline unsigned long xa_find_entry(struct xarray *xa, void *item) return entry ? xas.xa_index : -1; } -static noinline void check_find_entry(struct xarray *xa) +static noinline void check_find_entry(struct kunit *test) { + struct xarray *xa = xa_param(test); + #ifdef CONFIG_XARRAY_MULTI unsigned int order; unsigned long offset, index; @@ -1410,12 +1471,14 @@ static noinline void check_find_entry(struct xarray *xa) xa_store_index(xa, ULONG_MAX, GFP_KERNEL); XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1); - xa_erase_index(xa, ULONG_MAX); + xa_erase_index(test, xa, ULONG_MAX); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_pause(struct xarray *xa) +static noinline void check_pause(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); void *entry; unsigned int order; @@ -1448,10 +1511,47 @@ static noinline void check_pause(struct xarray *xa) XA_BUG_ON(xa, count != order_limit); xa_destroy(xa); + + index = 0; + for (order = XA_CHUNK_SHIFT; order > 0; order--) { + XA_BUG_ON(xa, xa_store_order(xa, index, order, + xa_mk_index(index), GFP_KERNEL)); + index += 1UL << order; + } + + index = 0; + count = 0; + xas_set(&xas, 0); + rcu_read_lock(); + xas_for_each(&xas, entry, ULONG_MAX) { + XA_BUG_ON(xa, entry != xa_mk_index(index)); + index += 1UL << (XA_CHUNK_SHIFT - count); + count++; + } + rcu_read_unlock(); + XA_BUG_ON(xa, count != XA_CHUNK_SHIFT); + + index = 0; + count = 0; + xas_set(&xas, XA_CHUNK_SIZE / 2 + 1); + rcu_read_lock(); + xas_for_each(&xas, entry, ULONG_MAX) { + XA_BUG_ON(xa, entry != xa_mk_index(index)); + index += 1UL << (XA_CHUNK_SHIFT - count); + count++; + xas_pause(&xas); + } + rcu_read_unlock(); + XA_BUG_ON(xa, count != XA_CHUNK_SHIFT); + + xa_destroy(xa); + } -static noinline void check_move_tiny(struct xarray *xa) +static noinline void check_move_tiny(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); XA_BUG_ON(xa, !xa_empty(xa)); @@ -1468,12 +1568,14 @@ static noinline void check_move_tiny(struct xarray *xa) XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0)); XA_BUG_ON(xa, xas_prev(&xas) != NULL); rcu_read_unlock(); - xa_erase_index(xa, 0); + xa_erase_index(test, xa, 0); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_move_max(struct xarray *xa) +static noinline void check_move_max(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); xa_store_index(xa, ULONG_MAX, GFP_KERNEL); @@ -1489,12 +1591,14 @@ static noinline void check_move_max(struct xarray *xa) XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL); rcu_read_unlock(); - xa_erase_index(xa, ULONG_MAX); + xa_erase_index(test, xa, ULONG_MAX); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_move_small(struct xarray *xa, unsigned long idx) +static noinline void check_move_small(struct kunit *test, unsigned long idx) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); unsigned long i; @@ -1536,13 +1640,15 @@ static noinline void check_move_small(struct xarray *xa, unsigned long idx) XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); rcu_read_unlock(); - xa_erase_index(xa, 0); - xa_erase_index(xa, idx); + xa_erase_index(test, xa, 0); + xa_erase_index(test, xa, idx); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_move(struct xarray *xa) +static noinline void check_move(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, (1 << 16) - 1); unsigned long i; @@ -1569,7 +1675,7 @@ static noinline void check_move(struct xarray *xa) rcu_read_unlock(); for (i = (1 << 8); i < (1 << 15); i++) - xa_erase_index(xa, i); + xa_erase_index(test, xa, i); i = xas.xa_index; @@ -1600,17 +1706,17 @@ static noinline void check_move(struct xarray *xa) xa_destroy(xa); - check_move_tiny(xa); - check_move_max(xa); + check_move_tiny(test); + check_move_max(test); for (i = 0; i < 16; i++) - check_move_small(xa, 1UL << i); + check_move_small(test, 1UL << i); for (i = 2; i < 16; i++) - check_move_small(xa, (1UL << i) - 1); + check_move_small(test, (1UL << i) - 1); } -static noinline void xa_store_many_order(struct xarray *xa, +static noinline void xa_store_many_order(struct kunit *test, struct xarray *xa, unsigned long index, unsigned order) { XA_STATE_ORDER(xas, xa, index, order); @@ -1633,30 +1739,34 @@ unlock: XA_BUG_ON(xa, xas_error(&xas)); } -static noinline void check_create_range_1(struct xarray *xa, +static noinline void check_create_range_1(struct kunit *test, unsigned long index, unsigned order) { + struct xarray *xa = xa_param(test); + unsigned long i; - xa_store_many_order(xa, index, order); + xa_store_many_order(test, xa, index, order); for (i = index; i < index + (1UL << order); i++) - xa_erase_index(xa, i); + xa_erase_index(test, xa, i); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_create_range_2(struct xarray *xa, unsigned order) +static noinline void check_create_range_2(struct kunit *test, unsigned int order) { + struct xarray *xa = xa_param(test); + unsigned long i; unsigned long nr = 1UL << order; for (i = 0; i < nr * nr; i += nr) - xa_store_many_order(xa, i, order); + xa_store_many_order(test, xa, i, order); for (i = 0; i < nr * nr; i++) - xa_erase_index(xa, i); + xa_erase_index(test, xa, i); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_create_range_3(void) +static noinline void check_create_range_3(struct kunit *test) { XA_STATE(xas, NULL, 0); xas_set_err(&xas, -EEXIST); @@ -1664,9 +1774,11 @@ static noinline void check_create_range_3(void) XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST); } -static noinline void check_create_range_4(struct xarray *xa, +static noinline void check_create_range_4(struct kunit *test, unsigned long index, unsigned order) { + struct xarray *xa = xa_param(test); + XA_STATE_ORDER(xas, xa, index, order); unsigned long base = xas.xa_index; unsigned long i = 0; @@ -1692,13 +1804,15 @@ unlock: XA_BUG_ON(xa, xas_error(&xas)); for (i = base; i < base + (1UL << order); i++) - xa_erase_index(xa, i); + xa_erase_index(test, xa, i); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_create_range_5(struct xarray *xa, +static noinline void check_create_range_5(struct kunit *test, unsigned long index, unsigned int order) { + struct xarray *xa = xa_param(test); + XA_STATE_ORDER(xas, xa, index, order); unsigned int i; @@ -1715,44 +1829,46 @@ static noinline void check_create_range_5(struct xarray *xa, xa_destroy(xa); } -static noinline void check_create_range(struct xarray *xa) +static noinline void check_create_range(struct kunit *test) { unsigned int order; unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1; for (order = 0; order < max_order; order++) { - check_create_range_1(xa, 0, order); - check_create_range_1(xa, 1U << order, order); - check_create_range_1(xa, 2U << order, order); - check_create_range_1(xa, 3U << order, order); - check_create_range_1(xa, 1U << 24, order); + check_create_range_1(test, 0, order); + check_create_range_1(test, 1U << order, order); + check_create_range_1(test, 2U << order, order); + check_create_range_1(test, 3U << order, order); + check_create_range_1(test, 1U << 24, order); if (order < 10) - check_create_range_2(xa, order); - - check_create_range_4(xa, 0, order); - check_create_range_4(xa, 1U << order, order); - check_create_range_4(xa, 2U << order, order); - check_create_range_4(xa, 3U << order, order); - check_create_range_4(xa, 1U << 24, order); - - check_create_range_4(xa, 1, order); - check_create_range_4(xa, (1U << order) + 1, order); - check_create_range_4(xa, (2U << order) + 1, order); - check_create_range_4(xa, (2U << order) - 1, order); - check_create_range_4(xa, (3U << order) + 1, order); - check_create_range_4(xa, (3U << order) - 1, order); - check_create_range_4(xa, (1U << 24) + 1, order); - - check_create_range_5(xa, 0, order); - check_create_range_5(xa, (1U << order), order); + check_create_range_2(test, order); + + check_create_range_4(test, 0, order); + check_create_range_4(test, 1U << order, order); + check_create_range_4(test, 2U << order, order); + check_create_range_4(test, 3U << order, order); + check_create_range_4(test, 1U << 24, order); + + check_create_range_4(test, 1, order); + check_create_range_4(test, (1U << order) + 1, order); + check_create_range_4(test, (2U << order) + 1, order); + check_create_range_4(test, (2U << order) - 1, order); + check_create_range_4(test, (3U << order) + 1, order); + check_create_range_4(test, (3U << order) - 1, order); + check_create_range_4(test, (1U << 24) + 1, order); + + check_create_range_5(test, 0, order); + check_create_range_5(test, (1U << order), order); } - check_create_range_3(); + check_create_range_3(test); } -static noinline void __check_store_range(struct xarray *xa, unsigned long first, +static noinline void __check_store_range(struct kunit *test, unsigned long first, unsigned long last) { + struct xarray *xa = xa_param(test); + #ifdef CONFIG_XARRAY_MULTI xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL); @@ -1767,26 +1883,28 @@ static noinline void __check_store_range(struct xarray *xa, unsigned long first, XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_store_range(struct xarray *xa) +static noinline void check_store_range(struct kunit *test) { unsigned long i, j; for (i = 0; i < 128; i++) { for (j = i; j < 128; j++) { - __check_store_range(xa, i, j); - __check_store_range(xa, 128 + i, 128 + j); - __check_store_range(xa, 4095 + i, 4095 + j); - __check_store_range(xa, 4096 + i, 4096 + j); - __check_store_range(xa, 123456 + i, 123456 + j); - __check_store_range(xa, (1 << 24) + i, (1 << 24) + j); + __check_store_range(test, i, j); + __check_store_range(test, 128 + i, 128 + j); + __check_store_range(test, 4095 + i, 4095 + j); + __check_store_range(test, 4096 + i, 4096 + j); + __check_store_range(test, 123456 + i, 123456 + j); + __check_store_range(test, (1 << 24) + i, (1 << 24) + j); } } } #ifdef CONFIG_XARRAY_MULTI -static void check_split_1(struct xarray *xa, unsigned long index, +static void check_split_1(struct kunit *test, unsigned long index, unsigned int order, unsigned int new_order) { + struct xarray *xa = xa_param(test); + XA_STATE_ORDER(xas, xa, index, new_order); unsigned int i, found; void *entry; @@ -1822,26 +1940,30 @@ static void check_split_1(struct xarray *xa, unsigned long index, xa_destroy(xa); } -static noinline void check_split(struct xarray *xa) +static noinline void check_split(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned int order, new_order; XA_BUG_ON(xa, !xa_empty(xa)); for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) { for (new_order = 0; new_order < order; new_order++) { - check_split_1(xa, 0, order, new_order); - check_split_1(xa, 1UL << order, order, new_order); - check_split_1(xa, 3UL << order, order, new_order); + check_split_1(test, 0, order, new_order); + check_split_1(test, 1UL << order, order, new_order); + check_split_1(test, 3UL << order, order, new_order); } } } #else -static void check_split(struct xarray *xa) { } +static void check_split(struct kunit *test) { } #endif -static void check_align_1(struct xarray *xa, char *name) +static void check_align_1(struct kunit *test, char *name) { + struct xarray *xa = xa_param(test); + int i; unsigned int id; unsigned long index; @@ -1861,8 +1983,10 @@ static void check_align_1(struct xarray *xa, char *name) * We should always be able to store without allocating memory after * reserving a slot. */ -static void check_align_2(struct xarray *xa, char *name) +static void check_align_2(struct kunit *test, char *name) { + struct xarray *xa = xa_param(test); + int i; XA_BUG_ON(xa, !xa_empty(xa)); @@ -1881,15 +2005,15 @@ static void check_align_2(struct xarray *xa, char *name) XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_align(struct xarray *xa) +static noinline void check_align(struct kunit *test) { char name[] = "Motorola 68000"; - check_align_1(xa, name); - check_align_1(xa, name + 1); - check_align_1(xa, name + 2); - check_align_1(xa, name + 3); - check_align_2(xa, name); + check_align_1(test, name); + check_align_1(test, name + 1); + check_align_1(test, name + 2); + check_align_1(test, name + 3); + check_align_2(test, name); } static LIST_HEAD(shadow_nodes); @@ -1905,7 +2029,7 @@ static void test_update_node(struct xa_node *node) } } -static noinline void shadow_remove(struct xarray *xa) +static noinline void shadow_remove(struct kunit *test, struct xarray *xa) { struct xa_node *node; @@ -1919,8 +2043,17 @@ static noinline void shadow_remove(struct xarray *xa) xa_unlock(xa); } -static noinline void check_workingset(struct xarray *xa, unsigned long index) +struct workingset_testcase { + struct xarray *xa; + unsigned long index; +}; + +static noinline void check_workingset(struct kunit *test) { + struct workingset_testcase tc = *(struct workingset_testcase *)test->param_value; + struct xarray *xa = tc.xa; + unsigned long index = tc.index; + XA_STATE(xas, xa, index); xas_set_update(&xas, test_update_node); @@ -1943,7 +2076,7 @@ static noinline void check_workingset(struct xarray *xa, unsigned long index) xas_unlock(&xas); XA_BUG_ON(xa, list_empty(&shadow_nodes)); - shadow_remove(xa); + shadow_remove(test, xa); XA_BUG_ON(xa, !list_empty(&shadow_nodes)); XA_BUG_ON(xa, !xa_empty(xa)); } @@ -1952,9 +2085,11 @@ static noinline void check_workingset(struct xarray *xa, unsigned long index) * Check that the pointer / value / sibling entries are accounted the * way we expect them to be. */ -static noinline void check_account(struct xarray *xa) +static noinline void check_account(struct kunit *test) { #ifdef CONFIG_XARRAY_MULTI + struct xarray *xa = xa_param(test); + unsigned int order; for (order = 1; order < 12; order++) { @@ -1981,8 +2116,10 @@ static noinline void check_account(struct xarray *xa) #endif } -static noinline void check_get_order(struct xarray *xa) +static noinline void check_get_order(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; unsigned int order; unsigned long i, j; @@ -2001,8 +2138,10 @@ static noinline void check_get_order(struct xarray *xa) } } -static noinline void check_xas_get_order(struct xarray *xa) +static noinline void check_xas_get_order(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; @@ -2034,8 +2173,10 @@ static noinline void check_xas_get_order(struct xarray *xa) } } -static noinline void check_xas_conflict_get_order(struct xarray *xa) +static noinline void check_xas_conflict_get_order(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); void *entry; @@ -2092,8 +2233,10 @@ static noinline void check_xas_conflict_get_order(struct xarray *xa) } -static noinline void check_destroy(struct xarray *xa) +static noinline void check_destroy(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned long index; XA_BUG_ON(xa, !xa_empty(xa)); @@ -2126,52 +2269,59 @@ static noinline void check_destroy(struct xarray *xa) } static DEFINE_XARRAY(array); +static struct xarray *arrays[] = { &array }; +KUNIT_ARRAY_PARAM(array, arrays, NULL); + +static struct xarray *xa0s[] = { &xa0 }; +KUNIT_ARRAY_PARAM(xa0, xa0s, NULL); + +static struct workingset_testcase workingset_testcases[] = { + { &array, 0 }, + { &array, 64 }, + { &array, 4096 }, +}; +KUNIT_ARRAY_PARAM(workingset, workingset_testcases, NULL); + +static struct kunit_case xarray_cases[] = { + KUNIT_CASE_PARAM(check_xa_err, array_gen_params), + KUNIT_CASE_PARAM(check_xas_retry, array_gen_params), + KUNIT_CASE_PARAM(check_xa_load, array_gen_params), + KUNIT_CASE_PARAM(check_xa_mark, array_gen_params), + KUNIT_CASE_PARAM(check_xa_shrink, array_gen_params), + KUNIT_CASE_PARAM(check_xas_erase, array_gen_params), + KUNIT_CASE_PARAM(check_insert, array_gen_params), + KUNIT_CASE_PARAM(check_cmpxchg, array_gen_params), + KUNIT_CASE_PARAM(check_cmpxchg_order, array_gen_params), + KUNIT_CASE_PARAM(check_reserve, array_gen_params), + KUNIT_CASE_PARAM(check_reserve, xa0_gen_params), + KUNIT_CASE_PARAM(check_multi_store, array_gen_params), + KUNIT_CASE_PARAM(check_multi_store_advanced, array_gen_params), + KUNIT_CASE_PARAM(check_get_order, array_gen_params), + KUNIT_CASE_PARAM(check_xas_get_order, array_gen_params), + KUNIT_CASE_PARAM(check_xas_conflict_get_order, array_gen_params), + KUNIT_CASE(check_xa_alloc), + KUNIT_CASE_PARAM(check_find, array_gen_params), + KUNIT_CASE_PARAM(check_find_entry, array_gen_params), + KUNIT_CASE_PARAM(check_pause, array_gen_params), + KUNIT_CASE_PARAM(check_account, array_gen_params), + KUNIT_CASE_PARAM(check_destroy, array_gen_params), + KUNIT_CASE_PARAM(check_move, array_gen_params), + KUNIT_CASE_PARAM(check_create_range, array_gen_params), + KUNIT_CASE_PARAM(check_store_range, array_gen_params), + KUNIT_CASE_PARAM(check_store_iter, array_gen_params), + KUNIT_CASE_PARAM(check_align, xa0_gen_params), + KUNIT_CASE_PARAM(check_split, array_gen_params), + KUNIT_CASE_PARAM(check_workingset, workingset_gen_params), + {}, +}; + +static struct kunit_suite xarray_suite = { + .name = "xarray", + .test_cases = xarray_cases, +}; + +kunit_test_suite(xarray_suite); -static int xarray_checks(void) -{ - check_xa_err(&array); - check_xas_retry(&array); - check_xa_load(&array); - check_xa_mark(&array); - check_xa_shrink(&array); - check_xas_erase(&array); - check_insert(&array); - check_cmpxchg(&array); - check_cmpxchg_order(&array); - check_reserve(&array); - check_reserve(&xa0); - check_multi_store(&array); - check_multi_store_advanced(&array); - check_get_order(&array); - check_xas_get_order(&array); - check_xas_conflict_get_order(&array); - check_xa_alloc(); - check_find(&array); - check_find_entry(&array); - check_pause(&array); - check_account(&array); - check_destroy(&array); - check_move(&array); - check_create_range(&array); - check_store_range(&array); - check_store_iter(&array); - check_align(&xa0); - check_split(&array); - - check_workingset(&array, 0); - check_workingset(&array, 64); - check_workingset(&array, 4096); - - printk("XArray: %u of %u tests passed\n", tests_passed, tests_run); - return (tests_run == tests_passed) ? 0 : -EINVAL; -} - -static void xarray_exit(void) -{ -} - -module_init(xarray_checks); -module_exit(xarray_exit); MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>"); MODULE_DESCRIPTION("XArray API test module"); MODULE_LICENSE("GPL"); diff --git a/lib/xarray.c b/lib/xarray.c index 32d4bac8c94c..116e9286c64e 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -125,19 +125,20 @@ static inline void node_mark_all(struct xa_node *node, xa_mark_t mark) */ static void xas_squash_marks(const struct xa_state *xas) { - unsigned int mark = 0; + xa_mark_t mark = 0; unsigned int limit = xas->xa_offset + xas->xa_sibs + 1; - if (!xas->xa_sibs) - return; + for (;;) { + unsigned long *marks = node_marks(xas->xa_node, mark); - do { - unsigned long *marks = xas->xa_node->marks[mark]; - if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit) - continue; - __set_bit(xas->xa_offset, marks); - bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs); - } while (mark++ != (__force unsigned)XA_MARK_MAX); + if (find_next_bit(marks, limit, xas->xa_offset + 1) != limit) { + __set_bit(xas->xa_offset, marks); + bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs); + } + if (mark == XA_MARK_MAX) + break; + mark_inc(mark); + } } /* extracts the offset within this node from the index */ @@ -435,6 +436,11 @@ static unsigned long max_index(void *entry) return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1; } +static inline void *xa_zero_to_null(void *entry) +{ + return xa_is_zero(entry) ? NULL : entry; +} + static void xas_shrink(struct xa_state *xas) { struct xarray *xa = xas->xa; @@ -451,8 +457,8 @@ static void xas_shrink(struct xa_state *xas) break; if (!xa_is_node(entry) && node->shift) break; - if (xa_is_zero(entry) && xa_zero_busy(xa)) - entry = NULL; + if (xa_zero_busy(xa)) + entry = xa_zero_to_null(entry); xas->xa_node = XAS_BOUNDS; RCU_INIT_POINTER(xa->xa_head, entry); @@ -1022,7 +1028,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order, unsigned int mask = xas->xa_sibs; /* XXX: no support for splitting really large entries yet */ - if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order)) + if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT <= order)) goto nomem; if (xas->xa_shift + XA_CHUNK_SHIFT > order) return; @@ -1147,6 +1153,7 @@ void xas_pause(struct xa_state *xas) if (!xa_is_sibling(xa_entry(xas->xa, node, offset))) break; } + xas->xa_index &= ~0UL << node->shift; xas->xa_index += (offset - xas->xa_offset) << node->shift; if (xas->xa_index == 0) xas->xa_node = XAS_BOUNDS; @@ -1382,6 +1389,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark) entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK)) continue; + if (xa_is_sibling(entry)) + continue; if (!xa_is_node(entry)) return entry; xas->xa_node = xa_to_node(entry); @@ -1474,9 +1483,7 @@ void *xa_load(struct xarray *xa, unsigned long index) rcu_read_lock(); do { - entry = xas_load(&xas); - if (xa_is_zero(entry)) - entry = NULL; + entry = xa_zero_to_null(xas_load(&xas)); } while (xas_retry(&xas, entry)); rcu_read_unlock(); @@ -1486,8 +1493,6 @@ EXPORT_SYMBOL(xa_load); static void *xas_result(struct xa_state *xas, void *curr) { - if (xa_is_zero(curr)) - return NULL; if (xas_error(xas)) curr = xas->xa_node; return curr; @@ -1508,7 +1513,7 @@ static void *xas_result(struct xa_state *xas, void *curr) void *__xa_erase(struct xarray *xa, unsigned long index) { XA_STATE(xas, xa, index); - return xas_result(&xas, xas_store(&xas, NULL)); + return xas_result(&xas, xa_zero_to_null(xas_store(&xas, NULL))); } EXPORT_SYMBOL(__xa_erase); @@ -1567,7 +1572,7 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) xas_clear_mark(&xas, XA_FREE_MARK); } while (__xas_nomem(&xas, gfp)); - return xas_result(&xas, curr); + return xas_result(&xas, xa_zero_to_null(curr)); } EXPORT_SYMBOL(__xa_store); @@ -1600,6 +1605,9 @@ void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) } EXPORT_SYMBOL(xa_store); +static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index, + void *old, void *entry, gfp_t gfp); + /** * __xa_cmpxchg() - Store this entry in the XArray. * @xa: XArray. @@ -1619,6 +1627,13 @@ EXPORT_SYMBOL(xa_store); void *__xa_cmpxchg(struct xarray *xa, unsigned long index, void *old, void *entry, gfp_t gfp) { + return xa_zero_to_null(__xa_cmpxchg_raw(xa, index, old, entry, gfp)); +} +EXPORT_SYMBOL(__xa_cmpxchg); + +static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index, + void *old, void *entry, gfp_t gfp) +{ XA_STATE(xas, xa, index); void *curr; @@ -1636,7 +1651,6 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, return xas_result(&xas, curr); } -EXPORT_SYMBOL(__xa_cmpxchg); /** * __xa_insert() - Store this entry in the XArray if no entry is present. @@ -1656,26 +1670,16 @@ EXPORT_SYMBOL(__xa_cmpxchg); */ int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) { - XA_STATE(xas, xa, index); void *curr; + int errno; - if (WARN_ON_ONCE(xa_is_advanced(entry))) - return -EINVAL; if (!entry) entry = XA_ZERO_ENTRY; - - do { - curr = xas_load(&xas); - if (!curr) { - xas_store(&xas, entry); - if (xa_track_free(xa)) - xas_clear_mark(&xas, XA_FREE_MARK); - } else { - xas_set_err(&xas, -EBUSY); - } - } while (__xas_nomem(&xas, gfp)); - - return xas_error(&xas); + curr = __xa_cmpxchg_raw(xa, index, NULL, entry, gfp); + errno = xa_err(curr); + if (errno) + return errno; + return (curr != NULL) ? -EBUSY : 0; } EXPORT_SYMBOL(__xa_insert); |