summaryrefslogtreecommitdiff
path: root/lib/test_maple_tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/test_maple_tree.c')
-rw-r--r--lib/test_maple_tree.c227
1 files changed, 89 insertions, 138 deletions
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index 704cb1093ae8..a182e48b5f5e 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -1563,6 +1563,30 @@ static noinline void __init check_root_expand(struct maple_tree *mt)
mas_unlock(&mas);
}
+static noinline void __init check_deficient_node(struct maple_tree *mt)
+{
+ MA_STATE(mas, mt, 0, 0);
+ int count;
+
+ mas_lock(&mas);
+ for (count = 0; count < 10; count++) {
+ mas_set(&mas, count);
+ mas_store_gfp(&mas, xa_mk_value(count), GFP_KERNEL);
+ }
+
+ for (count = 20; count < 39; count++) {
+ mas_set(&mas, count);
+ mas_store_gfp(&mas, xa_mk_value(count), GFP_KERNEL);
+ }
+
+ for (count = 10; count < 12; count++) {
+ mas_set(&mas, count);
+ mas_store_gfp(&mas, xa_mk_value(count), GFP_KERNEL);
+ }
+ mas_unlock(&mas);
+ mt_validate(mt);
+}
+
static noinline void __init check_gap_combining(struct maple_tree *mt)
{
struct maple_enode *mn1, *mn2;
@@ -2722,139 +2746,6 @@ static noinline void __init check_fuzzer(struct maple_tree *mt)
mtree_test_erase(mt, ULONG_MAX - 10);
}
-/* duplicate the tree with a specific gap */
-static noinline void __init check_dup_gaps(struct maple_tree *mt,
- unsigned long nr_entries, bool zero_start,
- unsigned long gap)
-{
- unsigned long i = 0;
- struct maple_tree newmt;
- int ret;
- void *tmp;
- MA_STATE(mas, mt, 0, 0);
- MA_STATE(newmas, &newmt, 0, 0);
- struct rw_semaphore newmt_lock;
-
- init_rwsem(&newmt_lock);
- mt_set_external_lock(&newmt, &newmt_lock);
-
- if (!zero_start)
- i = 1;
-
- mt_zero_nr_tallocated();
- for (; i <= nr_entries; i++)
- mtree_store_range(mt, i*10, (i+1)*10 - gap,
- xa_mk_value(i), GFP_KERNEL);
-
- mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
- mt_set_non_kernel(99999);
- down_write(&newmt_lock);
- ret = mas_expected_entries(&newmas, nr_entries);
- mt_set_non_kernel(0);
- MT_BUG_ON(mt, ret != 0);
-
- rcu_read_lock();
- mas_for_each(&mas, tmp, ULONG_MAX) {
- newmas.index = mas.index;
- newmas.last = mas.last;
- mas_store(&newmas, tmp);
- }
- rcu_read_unlock();
- mas_destroy(&newmas);
-
- __mt_destroy(&newmt);
- up_write(&newmt_lock);
-}
-
-/* Duplicate many sizes of trees. Mainly to test expected entry values */
-static noinline void __init check_dup(struct maple_tree *mt)
-{
- int i;
- int big_start = 100010;
-
- /* Check with a value at zero */
- for (i = 10; i < 1000; i++) {
- mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
- check_dup_gaps(mt, i, true, 5);
- mtree_destroy(mt);
- rcu_barrier();
- }
-
- cond_resched();
- mt_cache_shrink();
- /* Check with a value at zero, no gap */
- for (i = 1000; i < 2000; i++) {
- mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
- check_dup_gaps(mt, i, true, 0);
- mtree_destroy(mt);
- rcu_barrier();
- }
-
- cond_resched();
- mt_cache_shrink();
- /* Check with a value at zero and unreasonably large */
- for (i = big_start; i < big_start + 10; i++) {
- mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
- check_dup_gaps(mt, i, true, 5);
- mtree_destroy(mt);
- rcu_barrier();
- }
-
- cond_resched();
- mt_cache_shrink();
- /* Small to medium size not starting at zero*/
- for (i = 200; i < 1000; i++) {
- mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
- check_dup_gaps(mt, i, false, 5);
- mtree_destroy(mt);
- rcu_barrier();
- }
-
- cond_resched();
- mt_cache_shrink();
- /* Unreasonably large not starting at zero*/
- for (i = big_start; i < big_start + 10; i++) {
- mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
- check_dup_gaps(mt, i, false, 5);
- mtree_destroy(mt);
- rcu_barrier();
- cond_resched();
- mt_cache_shrink();
- }
-
- /* Check non-allocation tree not starting at zero */
- for (i = 1500; i < 3000; i++) {
- mt_init_flags(mt, 0);
- check_dup_gaps(mt, i, false, 5);
- mtree_destroy(mt);
- rcu_barrier();
- cond_resched();
- if (i % 2 == 0)
- mt_cache_shrink();
- }
-
- mt_cache_shrink();
- /* Check non-allocation tree starting at zero */
- for (i = 200; i < 1000; i++) {
- mt_init_flags(mt, 0);
- check_dup_gaps(mt, i, true, 5);
- mtree_destroy(mt);
- rcu_barrier();
- cond_resched();
- }
-
- mt_cache_shrink();
- /* Unreasonably large */
- for (i = big_start + 5; i < big_start + 10; i++) {
- mt_init_flags(mt, 0);
- check_dup_gaps(mt, i, true, 5);
- mtree_destroy(mt);
- rcu_barrier();
- mt_cache_shrink();
- cond_resched();
- }
-}
-
static noinline void __init check_bnode_min_spanning(struct maple_tree *mt)
{
int i = 50;
@@ -3153,6 +3044,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
void *entry, *ptr = (void *) 0x1234500;
void *ptr2 = &ptr;
void *ptr3 = &ptr2;
+ unsigned long index;
/* Check MAS_ROOT First */
mtree_store_range(mt, 0, 0, ptr, GFP_KERNEL);
@@ -3537,7 +3429,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, mas.last != 0x1500);
MT_BUG_ON(mt, !mas_is_active(&mas));
- /* find: start ->active on value */;
+ /* find: start ->active on value */
mas_set(&mas, 1200);
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr);
@@ -3683,6 +3575,37 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, !mas_is_active(&mas));
mas_unlock(&mas);
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ mas_lock(&mas);
+ for (int count = 0; count < 30; count++) {
+ mas_set(&mas, count);
+ mas_store_gfp(&mas, xa_mk_value(count), GFP_KERNEL);
+ }
+
+ /* Ensure mas_find works with MA_UNDERFLOW */
+ mas_set(&mas, 0);
+ entry = mas_walk(&mas);
+ mas_set(&mas, 0);
+ mas_prev(&mas, 0);
+ MT_BUG_ON(mt, mas.status != ma_underflow);
+ MT_BUG_ON(mt, mas_find(&mas, ULONG_MAX) != entry);
+
+ /* Restore active on mas_next */
+ entry = mas_next(&mas, ULONG_MAX);
+ index = mas.index;
+ mas_prev(&mas, index);
+ MT_BUG_ON(mt, mas.status != ma_underflow);
+ MT_BUG_ON(mt, mas_next(&mas, ULONG_MAX) != entry);
+
+ /* Ensure overflow -> active works */
+ mas_prev(&mas, 0);
+ mas_next(&mas, index - 1);
+ MT_BUG_ON(mt, mas.status != ma_overflow);
+ MT_BUG_ON(mt, mas_next(&mas, ULONG_MAX) != entry);
+
+ mas_unlock(&mas);
}
static noinline void __init alloc_cyclic_testing(struct maple_tree *mt)
@@ -3714,6 +3637,34 @@ static noinline void __init alloc_cyclic_testing(struct maple_tree *mt)
}
mtree_destroy(mt);
+
+ /*
+ * Issue with reverse search was discovered
+ * https://lore.kernel.org/all/20241216060600.287B4C4CED0@smtp.kernel.org/
+ * Exhausting the allocation area and forcing the search to wrap needs a
+ * mas_reset() in mas_alloc_cyclic().
+ */
+ next = 0;
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (int i = 0; i < 1023; i++) {
+ mtree_alloc_cyclic(mt, &location, mt, 2, 1024, &next, GFP_KERNEL);
+ MT_BUG_ON(mt, i != location - 2);
+ MT_BUG_ON(mt, i != next - 3);
+ MT_BUG_ON(mt, mtree_load(mt, location) != mt);
+ }
+ mtree_erase(mt, 123);
+ MT_BUG_ON(mt, mtree_load(mt, 123) != NULL);
+ mtree_alloc_cyclic(mt, &location, mt, 2, 1024, &next, GFP_KERNEL);
+ MT_BUG_ON(mt, 123 != location);
+ MT_BUG_ON(mt, 124 != next);
+ MT_BUG_ON(mt, mtree_load(mt, location) != mt);
+ mtree_erase(mt, 100);
+ mtree_alloc_cyclic(mt, &location, mt, 2, 1024, &next, GFP_KERNEL);
+ MT_BUG_ON(mt, 100 != location);
+ MT_BUG_ON(mt, 101 != next);
+ MT_BUG_ON(mt, mtree_load(mt, location) != mt);
+ mtree_destroy(mt);
+
/* Overflow test */
next = ULONG_MAX - 1;
ret = mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL);
@@ -3797,6 +3748,10 @@ static int __init maple_tree_seed(void)
#endif
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_deficient_node(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_store_null(&tree);
mtree_destroy(&tree);
@@ -3990,10 +3945,6 @@ static int __init maple_tree_seed(void)
mtree_destroy(&tree);
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
- check_dup(&tree);
- mtree_destroy(&tree);
-
- mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_bnode_min_spanning(&tree);
mtree_destroy(&tree);