diff options
Diffstat (limited to 'tools/testing/radix-tree/maple.c')
| -rw-r--r-- | tools/testing/radix-tree/maple.c | 1407 |
1 files changed, 902 insertions, 505 deletions
diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c index 81fa7ec2e66a..5c1b18e3ed21 100644 --- a/tools/testing/radix-tree/maple.c +++ b/tools/testing/radix-tree/maple.c @@ -8,21 +8,16 @@ * difficult to handle in kernel tests. */ -#define CONFIG_DEBUG_MAPLE_TREE -#define CONFIG_MAPLE_SEARCH -#define MAPLE_32BIT (MAPLE_NODE_SLOTS > 31) -#include "test.h" -#include <stdlib.h> -#include <time.h> - #define module_init(x) #define module_exit(x) #define MODULE_AUTHOR(x) +#define MODULE_DESCRIPTION(X) #define MODULE_LICENSE(x) #define dump_stack() assert(0) -#include "../../../lib/maple_tree.c" -#undef CONFIG_DEBUG_MAPLE_TREE +#include "test.h" + +#include "../shared/maple-shim.c" #include "../../../lib/test_maple_tree.c" #define RCU_RANGE_COUNT 1000 @@ -45,6 +40,13 @@ struct rcu_test_struct2 { unsigned long last[RCU_RANGE_COUNT]; }; +struct rcu_test_struct3 { + struct maple_tree *mt; + unsigned long index; + unsigned long last; + bool stop; +}; + struct rcu_reader_struct { unsigned int id; int mod; @@ -56,369 +58,9 @@ struct rcu_reader_struct { }; /* - * check_new_node() - Check the creation of new nodes and error path - * verification. - */ -static noinline void check_new_node(struct maple_tree *mt) -{ - - struct maple_node *mn, *mn2, *mn3; - struct maple_alloc *smn; - struct maple_node *nodes[100]; - int i, j, total; - - MA_STATE(mas, mt, 0, 0); - - /* Try allocating 3 nodes */ - mtree_lock(mt); - mt_set_non_kernel(0); - /* request 3 nodes to be allocated. */ - mas_node_count(&mas, 3); - /* Allocation request of 3. */ - MT_BUG_ON(mt, mas_alloc_req(&mas) != 3); - /* Allocate failed. */ - MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM)); - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - - MT_BUG_ON(mt, mas_allocated(&mas) != 3); - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - MT_BUG_ON(mt, mn == NULL); - MT_BUG_ON(mt, mas.alloc == NULL); - MT_BUG_ON(mt, mas.alloc->slot[0] == NULL); - mas_push_node(&mas, mn); - mas_nomem(&mas, GFP_KERNEL); /* free */ - mtree_unlock(mt); - - - /* Try allocating 1 node, then 2 more */ - mtree_lock(mt); - /* Set allocation request to 1. */ - mas_set_alloc_req(&mas, 1); - /* Check Allocation request of 1. */ - MT_BUG_ON(mt, mas_alloc_req(&mas) != 1); - mas_set_err(&mas, -ENOMEM); - /* Validate allocation request. */ - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - /* Eat the requested node. */ - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - MT_BUG_ON(mt, mn == NULL); - MT_BUG_ON(mt, mn->slot[0] != NULL); - MT_BUG_ON(mt, mn->slot[1] != NULL); - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - - ma_free_rcu(mn); - mas.node = MAS_START; - mas_nomem(&mas, GFP_KERNEL); - /* Allocate 3 nodes, will fail. */ - mas_node_count(&mas, 3); - /* Drop the lock and allocate 3 nodes. */ - mas_nomem(&mas, GFP_KERNEL); - /* Ensure 3 are allocated. */ - MT_BUG_ON(mt, mas_allocated(&mas) != 3); - /* Allocation request of 0. */ - MT_BUG_ON(mt, mas_alloc_req(&mas) != 0); - - MT_BUG_ON(mt, mas.alloc == NULL); - MT_BUG_ON(mt, mas.alloc->slot[0] == NULL); - MT_BUG_ON(mt, mas.alloc->slot[1] == NULL); - /* Ensure we counted 3. */ - MT_BUG_ON(mt, mas_allocated(&mas) != 3); - /* Free. */ - mas_nomem(&mas, GFP_KERNEL); - - /* Set allocation request to 1. */ - mas_set_alloc_req(&mas, 1); - MT_BUG_ON(mt, mas_alloc_req(&mas) != 1); - mas_set_err(&mas, -ENOMEM); - /* Validate allocation request. */ - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - MT_BUG_ON(mt, mas_allocated(&mas) != 1); - /* Check the node is only one node. */ - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - MT_BUG_ON(mt, mn == NULL); - MT_BUG_ON(mt, mn->slot[0] != NULL); - MT_BUG_ON(mt, mn->slot[1] != NULL); - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - mas_push_node(&mas, mn); - MT_BUG_ON(mt, mas_allocated(&mas) != 1); - MT_BUG_ON(mt, mas.alloc->node_count); - - mas_set_alloc_req(&mas, 2); /* request 2 more. */ - MT_BUG_ON(mt, mas_alloc_req(&mas) != 2); - mas_set_err(&mas, -ENOMEM); - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - MT_BUG_ON(mt, mas_allocated(&mas) != 3); - MT_BUG_ON(mt, mas.alloc == NULL); - MT_BUG_ON(mt, mas.alloc->slot[0] == NULL); - MT_BUG_ON(mt, mas.alloc->slot[1] == NULL); - for (i = 2; i >= 0; i--) { - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, mas_allocated(&mas) != i); - MT_BUG_ON(mt, !mn); - MT_BUG_ON(mt, not_empty(mn)); - ma_free_rcu(mn); - } - - total = 64; - mas_set_alloc_req(&mas, total); /* request 2 more. */ - MT_BUG_ON(mt, mas_alloc_req(&mas) != total); - mas_set_err(&mas, -ENOMEM); - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - for (i = total; i > 0; i--) { - unsigned int e = 0; /* expected node_count */ - - if (!MAPLE_32BIT) { - if (i >= 35) - e = i - 35; - else if (i >= 5) - e = i - 5; - else if (i >= 2) - e = i - 2; - } else { - if (i >= 4) - e = i - 4; - else if (i == 3) - e = i - 2; - else - e = 0; - } - - MT_BUG_ON(mt, mas.alloc->node_count != e); - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - MT_BUG_ON(mt, mas_allocated(&mas) != i - 1); - MT_BUG_ON(mt, !mn); - ma_free_rcu(mn); - } - - total = 100; - for (i = 1; i < total; i++) { - mas_set_alloc_req(&mas, i); - mas_set_err(&mas, -ENOMEM); - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - for (j = i; j > 0; j--) { - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, mas_allocated(&mas) != j - 1); - MT_BUG_ON(mt, !mn); - MT_BUG_ON(mt, not_empty(mn)); - mas_push_node(&mas, mn); - MT_BUG_ON(mt, mas_allocated(&mas) != j); - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - MT_BUG_ON(mt, mas_allocated(&mas) != j - 1); - ma_free_rcu(mn); - } - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - - mas_set_alloc_req(&mas, i); - mas_set_err(&mas, -ENOMEM); - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - for (j = 0; j <= i/2; j++) { - MT_BUG_ON(mt, mas_allocated(&mas) != i - j); - nodes[j] = mas_pop_node(&mas); - MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1); - } - - while (j) { - j--; - mas_push_node(&mas, nodes[j]); - MT_BUG_ON(mt, mas_allocated(&mas) != i - j); - } - MT_BUG_ON(mt, mas_allocated(&mas) != i); - for (j = 0; j <= i/2; j++) { - MT_BUG_ON(mt, mas_allocated(&mas) != i - j); - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - ma_free_rcu(mn); - MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1); - } - MT_BUG_ON(mt, mas_nomem(&mas, GFP_KERNEL)); - - } - - /* Set allocation request. */ - total = 500; - mas_node_count(&mas, total); - /* Drop the lock and allocate the nodes. */ - mas_nomem(&mas, GFP_KERNEL); - MT_BUG_ON(mt, !mas.alloc); - i = 1; - smn = mas.alloc; - while (i < total) { - for (j = 0; j < MAPLE_ALLOC_SLOTS; j++) { - i++; - MT_BUG_ON(mt, !smn->slot[j]); - if (i == total) - break; - } - smn = smn->slot[0]; /* next. */ - } - MT_BUG_ON(mt, mas_allocated(&mas) != total); - mas_nomem(&mas, GFP_KERNEL); /* Free. */ - - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - for (i = 1; i < 128; i++) { - mas_node_count(&mas, i); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - MT_BUG_ON(mt, mas_allocated(&mas) != i); /* check request filled */ - for (j = i; j > 0; j--) { /*Free the requests */ - mn = mas_pop_node(&mas); /* get the next node. */ - MT_BUG_ON(mt, mn == NULL); - MT_BUG_ON(mt, not_empty(mn)); - ma_free_rcu(mn); - } - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - } - - for (i = 1; i < MAPLE_NODE_MASK + 1; i++) { - MA_STATE(mas2, mt, 0, 0); - mas_node_count(&mas, i); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - MT_BUG_ON(mt, mas_allocated(&mas) != i); /* check request filled */ - for (j = 1; j <= i; j++) { /* Move the allocations to mas2 */ - mn = mas_pop_node(&mas); /* get the next node. */ - MT_BUG_ON(mt, mn == NULL); - MT_BUG_ON(mt, not_empty(mn)); - mas_push_node(&mas2, mn); - MT_BUG_ON(mt, mas_allocated(&mas2) != j); - } - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - MT_BUG_ON(mt, mas_allocated(&mas2) != i); - - for (j = i; j > 0; j--) { /*Free the requests */ - MT_BUG_ON(mt, mas_allocated(&mas2) != j); - mn = mas_pop_node(&mas2); /* get the next node. */ - MT_BUG_ON(mt, mn == NULL); - MT_BUG_ON(mt, not_empty(mn)); - ma_free_rcu(mn); - } - MT_BUG_ON(mt, mas_allocated(&mas2) != 0); - } - - - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 1); /* Request */ - MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM)); - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1); - MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1); - - mn = mas_pop_node(&mas); /* get the next node. */ - MT_BUG_ON(mt, mn == NULL); - MT_BUG_ON(mt, not_empty(mn)); - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS); - MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 2); - - mas_push_node(&mas, mn); - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1); - MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1); - - /* Check the limit of pop/push/pop */ - mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 2); /* Request */ - MT_BUG_ON(mt, mas_alloc_req(&mas) != 1); - MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM)); - MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL)); - MT_BUG_ON(mt, mas_alloc_req(&mas)); - MT_BUG_ON(mt, mas.alloc->node_count); - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2); - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1); - MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1); - mas_push_node(&mas, mn); - MT_BUG_ON(mt, mas.alloc->node_count); - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2); - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - ma_free_rcu(mn); - for (i = 1; i <= MAPLE_ALLOC_SLOTS + 1; i++) { - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, not_empty(mn)); - ma_free_rcu(mn); - } - MT_BUG_ON(mt, mas_allocated(&mas) != 0); - - - for (i = 3; i < MAPLE_NODE_MASK * 3; i++) { - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, i); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - mn = mas_pop_node(&mas); /* get the next node. */ - mas_push_node(&mas, mn); /* put it back */ - mas_destroy(&mas); - - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, i); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - mn = mas_pop_node(&mas); /* get the next node. */ - mn2 = mas_pop_node(&mas); /* get the next node. */ - mas_push_node(&mas, mn); /* put them back */ - mas_push_node(&mas, mn2); - mas_destroy(&mas); - - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, i); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - mn = mas_pop_node(&mas); /* get the next node. */ - mn2 = mas_pop_node(&mas); /* get the next node. */ - mn3 = mas_pop_node(&mas); /* get the next node. */ - mas_push_node(&mas, mn); /* put them back */ - mas_push_node(&mas, mn2); - mas_push_node(&mas, mn3); - mas_destroy(&mas); - - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, i); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - mn = mas_pop_node(&mas); /* get the next node. */ - ma_free_rcu(mn); - mas_destroy(&mas); - - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, i); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - mn = mas_pop_node(&mas); /* get the next node. */ - ma_free_rcu(mn); - mn = mas_pop_node(&mas); /* get the next node. */ - ma_free_rcu(mn); - mn = mas_pop_node(&mas); /* get the next node. */ - ma_free_rcu(mn); - mas_destroy(&mas); - } - - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, 5); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - MT_BUG_ON(mt, mas_allocated(&mas) != 5); - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, 10); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - mas.node = MAS_START; - MT_BUG_ON(mt, mas_allocated(&mas) != 10); - mas_destroy(&mas); - - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, MAPLE_ALLOC_SLOTS - 1); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS - 1); - mas.node = MA_ERROR(-ENOMEM); - mas_node_count(&mas, 10 + MAPLE_ALLOC_SLOTS - 1); /* Request */ - mas_nomem(&mas, GFP_KERNEL); /* Fill request */ - mas.node = MAS_START; - MT_BUG_ON(mt, mas_allocated(&mas) != 10 + MAPLE_ALLOC_SLOTS - 1); - mas_destroy(&mas); - - mtree_unlock(mt); -} - -/* * Check erasing including RCU. */ -static noinline void check_erase(struct maple_tree *mt, unsigned long index, +static noinline void __init check_erase(struct maple_tree *mt, unsigned long index, void *ptr) { MT_BUG_ON(mt, mtree_test_erase(mt, index) != ptr); @@ -428,24 +70,24 @@ static noinline void check_erase(struct maple_tree *mt, unsigned long index, #define erase_check_insert(mt, i) check_insert(mt, set[i], entry[i%2]) #define erase_check_erase(mt, i) check_erase(mt, set[i], entry[i%2]) -static noinline void check_erase_testset(struct maple_tree *mt) +static noinline void __init check_erase_testset(struct maple_tree *mt) { - unsigned long set[] = { 5015, 5014, 5017, 25, 1000, - 1001, 1002, 1003, 1005, 0, - 6003, 6002, 6008, 6012, 6015, - 7003, 7002, 7008, 7012, 7015, - 8003, 8002, 8008, 8012, 8015, - 9003, 9002, 9008, 9012, 9015, - 10003, 10002, 10008, 10012, 10015, - 11003, 11002, 11008, 11012, 11015, - 12003, 12002, 12008, 12012, 12015, - 13003, 13002, 13008, 13012, 13015, - 14003, 14002, 14008, 14012, 14015, - 15003, 15002, 15008, 15012, 15015, - }; - - - void *ptr = &set; + static const unsigned long set[] = { 5015, 5014, 5017, 25, 1000, + 1001, 1002, 1003, 1005, 0, + 6003, 6002, 6008, 6012, 6015, + 7003, 7002, 7008, 7012, 7015, + 8003, 8002, 8008, 8012, 8015, + 9003, 9002, 9008, 9012, 9015, + 10003, 10002, 10008, 10012, 10015, + 11003, 11002, 11008, 11012, 11015, + 12003, 12002, 12008, 12012, 12015, + 13003, 13002, 13008, 13012, 13015, + 14003, 14002, 14008, 14012, 14015, + 15003, 15002, 15008, 15012, 15015, + }; + + + void *ptr = &check_erase_testset; void *entry[2] = { ptr, mt }; void *root_node; @@ -702,7 +344,7 @@ static noinline void check_erase_testset(struct maple_tree *mt) int mas_ce2_over_count(struct ma_state *mas_start, struct ma_state *mas_end, void *s_entry, unsigned long s_min, void *e_entry, unsigned long e_max, - unsigned long *set, int i, bool null_entry) + const unsigned long *set, int i, bool null_entry) { int count = 0, span = 0; unsigned long retry = 0; @@ -897,10 +539,11 @@ retry: ret = mas_descend_walk(mas, range_min, range_max); if (unlikely(mte_dead_node(mas->node))) { - mas->node = MAS_START; + mas->status = ma_start; goto retry; } + mas->end = mas_data_end(mas); return ret; not_found: @@ -916,24 +559,26 @@ static inline void *mas_range_load(struct ma_state *mas, unsigned long index = mas->index; if (mas_is_none(mas) || mas_is_paused(mas)) - mas->node = MAS_START; + mas->status = ma_start; retry: if (mas_tree_walk(mas, range_min, range_max)) - if (unlikely(mas->node == MAS_ROOT)) + if (unlikely(mas->status == ma_root)) return mas_root(mas); if (likely(mas->offset != MAPLE_NODE_SLOTS)) entry = mas_get_slot(mas, mas->offset); - if (mas_dead_node(mas, index)) + if (mas_is_active(mas) && mte_dead_node(mas->node)) { + mas_set(mas, index); goto retry; + } return entry; } #if defined(CONFIG_64BIT) -static noinline void check_erase2_testset(struct maple_tree *mt, - unsigned long *set, unsigned long size) +static noinline void __init check_erase2_testset(struct maple_tree *mt, + const unsigned long *set, unsigned long size) { int entry_count = 0; int check = 0; @@ -1017,7 +662,7 @@ static noinline void check_erase2_testset(struct maple_tree *mt, if (entry_count) MT_BUG_ON(mt, !mt_height(mt)); #if check_erase2_debug > 1 - mt_dump(mt); + mt_dump(mt, mt_dump_hex); #endif #if check_erase2_debug pr_err("Done\n"); @@ -1048,7 +693,7 @@ static noinline void check_erase2_testset(struct maple_tree *mt, mas_for_each(&mas, foo, ULONG_MAX) { if (xa_is_zero(foo)) { if (addr == mas.index) { - mt_dump(mas.tree); + mt_dump(mas.tree, mt_dump_hex); pr_err("retry failed %lu - %lu\n", mas.index, mas.last); MT_BUG_ON(mt, 1); @@ -1077,11 +722,11 @@ static noinline void check_erase2_testset(struct maple_tree *mt, /* These tests were pulled from KVM tree modifications which failed. */ -static noinline void check_erase2_sets(struct maple_tree *mt) +static noinline void __init check_erase2_sets(struct maple_tree *mt) { void *entry; unsigned long start = 0; - unsigned long set[] = { + static const unsigned long set[] = { STORE, 140737488347136, 140737488351231, STORE, 140721266458624, 140737488351231, ERASE, 140721266458624, 140737488351231, @@ -1099,7 +744,7 @@ ERASE, 140253902692352, 140253902864383, STORE, 140253902692352, 140253902696447, STORE, 140253902696448, 140253902864383, }; - unsigned long set2[] = { + static const unsigned long set2[] = { STORE, 140737488347136, 140737488351231, STORE, 140735933583360, 140737488351231, ERASE, 140735933583360, 140737488351231, @@ -1123,7 +768,7 @@ STORE, 140277094813696, 140277094821887, STORE, 140277094821888, 140277094825983, STORE, 140735933906944, 140735933911039, }; - unsigned long set3[] = { + static const unsigned long set3[] = { STORE, 140737488347136, 140737488351231, STORE, 140735790264320, 140737488351231, ERASE, 140735790264320, 140737488351231, @@ -1166,7 +811,7 @@ STORE, 47135835840512, 47135835885567, STORE, 47135835885568, 47135835893759, }; - unsigned long set4[] = { + static const unsigned long set4[] = { STORE, 140737488347136, 140737488351231, STORE, 140728251703296, 140737488351231, ERASE, 140728251703296, 140737488351231, @@ -1187,7 +832,7 @@ ERASE, 47646523277312, 47646523445247, STORE, 47646523277312, 47646523400191, }; - unsigned long set5[] = { + static const unsigned long set5[] = { STORE, 140737488347136, 140737488351231, STORE, 140726874062848, 140737488351231, ERASE, 140726874062848, 140737488351231, @@ -1320,7 +965,7 @@ STORE, 47884791619584, 47884791623679, STORE, 47884791623680, 47884791627775, }; - unsigned long set6[] = { + static const unsigned long set6[] = { STORE, 140737488347136, 140737488351231, STORE, 140722999021568, 140737488351231, ERASE, 140722999021568, 140737488351231, @@ -1452,7 +1097,7 @@ ERASE, 47430432014336, 47430432022527, STORE, 47430432014336, 47430432018431, STORE, 47430432018432, 47430432022527, }; - unsigned long set7[] = { + static const unsigned long set7[] = { STORE, 140737488347136, 140737488351231, STORE, 140729808330752, 140737488351231, ERASE, 140729808330752, 140737488351231, @@ -1584,7 +1229,7 @@ ERASE, 47439987130368, 47439987138559, STORE, 47439987130368, 47439987134463, STORE, 47439987134464, 47439987138559, }; - unsigned long set8[] = { + static const unsigned long set8[] = { STORE, 140737488347136, 140737488351231, STORE, 140722482974720, 140737488351231, ERASE, 140722482974720, 140737488351231, @@ -1717,7 +1362,7 @@ STORE, 47708488638464, 47708488642559, STORE, 47708488642560, 47708488646655, }; - unsigned long set9[] = { + static const unsigned long set9[] = { STORE, 140737488347136, 140737488351231, STORE, 140736427839488, 140737488351231, ERASE, 140736427839488, 140736427839488, @@ -5583,7 +5228,7 @@ ERASE, 47906195480576, 47906195480576, STORE, 94641242615808, 94641242750975, }; - unsigned long set10[] = { + static const unsigned long set10[] = { STORE, 140737488347136, 140737488351231, STORE, 140736427839488, 140737488351231, ERASE, 140736427839488, 140736427839488, @@ -9447,7 +9092,7 @@ STORE, 139726599680000, 139726599684095, ERASE, 47906195480576, 47906195480576, STORE, 94641242615808, 94641242750975, }; - unsigned long set11[] = { + static const unsigned long set11[] = { STORE, 140737488347136, 140737488351231, STORE, 140732658499584, 140737488351231, ERASE, 140732658499584, 140732658499584, @@ -9473,7 +9118,7 @@ STORE, 140732658565120, 140732658569215, STORE, 140732658552832, 140732658565119, }; - unsigned long set12[] = { /* contains 12 values. */ + static const unsigned long set12[] = { /* contains 12 values. */ STORE, 140737488347136, 140737488351231, STORE, 140732658499584, 140737488351231, ERASE, 140732658499584, 140732658499584, @@ -9500,7 +9145,7 @@ STORE, 140732658552832, 140732658565119, STORE, 140014592741375, 140014592741375, /* contrived */ STORE, 140014592733184, 140014592741376, /* creates first entry retry. */ }; - unsigned long set13[] = { + static const unsigned long set13[] = { STORE, 140373516247040, 140373516251135,/*: ffffa2e7b0e10d80 */ STORE, 140373516251136, 140373516255231,/*: ffffa2e7b1195d80 */ STORE, 140373516255232, 140373516443647,/*: ffffa2e7b0e109c0 */ @@ -9513,7 +9158,7 @@ STORE, 140373518684160, 140373518688254,/*: ffffa2e7b05fec00 */ STORE, 140373518688256, 140373518692351,/*: ffffa2e7bfbdcd80 */ STORE, 140373518692352, 140373518696447,/*: ffffa2e7b0749e40 */ }; - unsigned long set14[] = { + static const unsigned long set14[] = { STORE, 140737488347136, 140737488351231, STORE, 140731667996672, 140737488351231, SNULL, 140731668000767, 140737488351231, @@ -9797,7 +9442,7 @@ SNULL, 139826136543232, 139826136809471, STORE, 139826136809472, 139826136842239, STORE, 139826136543232, 139826136809471, }; - unsigned long set15[] = { + static const unsigned long set15[] = { STORE, 140737488347136, 140737488351231, STORE, 140722061451264, 140737488351231, SNULL, 140722061455359, 140737488351231, @@ -10082,7 +9727,7 @@ STORE, 139906808958976, 139906808991743, STORE, 139906808692736, 139906808958975, }; - unsigned long set16[] = { + static const unsigned long set16[] = { STORE, 94174808662016, 94174809321471, STORE, 94174811414528, 94174811426815, STORE, 94174811426816, 94174811430911, @@ -10293,7 +9938,7 @@ STORE, 139921865613312, 139921865617407, STORE, 139921865547776, 139921865564159, }; - unsigned long set17[] = { + static const unsigned long set17[] = { STORE, 94397057224704, 94397057646591, STORE, 94397057650688, 94397057691647, STORE, 94397057691648, 94397057695743, @@ -10355,7 +10000,7 @@ STORE, 140720477511680, 140720477646847, STORE, 140720478302208, 140720478314495, STORE, 140720478314496, 140720478318591, }; - unsigned long set18[] = { + static const unsigned long set18[] = { STORE, 140737488347136, 140737488351231, STORE, 140724953673728, 140737488351231, SNULL, 140724953677823, 140737488351231, @@ -10388,7 +10033,7 @@ STORE, 140222970597376, 140222970605567, ERASE, 140222970597376, 140222970605567, STORE, 140222970597376, 140222970605567, }; - unsigned long set19[] = { + static const unsigned long set19[] = { STORE, 140737488347136, 140737488351231, STORE, 140725182459904, 140737488351231, SNULL, 140725182463999, 140737488351231, @@ -10657,7 +10302,7 @@ STORE, 140656836775936, 140656836780031, STORE, 140656787476480, 140656791920639, ERASE, 140656774639616, 140656779083775, }; - unsigned long set20[] = { + static const unsigned long set20[] = { STORE, 140737488347136, 140737488351231, STORE, 140735952392192, 140737488351231, SNULL, 140735952396287, 140737488351231, @@ -10813,7 +10458,7 @@ STORE, 140590386819072, 140590386823167, STORE, 140590386823168, 140590386827263, SNULL, 140590376591359, 140590376595455, }; - unsigned long set21[] = { + static const unsigned long set21[] = { STORE, 93874710941696, 93874711363583, STORE, 93874711367680, 93874711408639, STORE, 93874711408640, 93874711412735, @@ -10883,7 +10528,7 @@ ERASE, 140708393312256, 140708393316351, ERASE, 140708393308160, 140708393312255, ERASE, 140708393291776, 140708393308159, }; - unsigned long set22[] = { + static const unsigned long set22[] = { STORE, 93951397134336, 93951397183487, STORE, 93951397183488, 93951397728255, STORE, 93951397728256, 93951397826559, @@ -11010,7 +10655,7 @@ STORE, 140551361253376, 140551361519615, ERASE, 140551361253376, 140551361519615, }; - unsigned long set23[] = { + static const unsigned long set23[] = { STORE, 94014447943680, 94014448156671, STORE, 94014450253824, 94014450257919, STORE, 94014450257920, 94014450266111, @@ -14334,7 +13979,7 @@ SNULL, 140175956627455, 140175985139711, STORE, 140175927242752, 140175956627455, STORE, 140175956627456, 140175985139711, }; - unsigned long set24[] = { + static const unsigned long set24[] = { STORE, 140737488347136, 140737488351231, STORE, 140735281639424, 140737488351231, SNULL, 140735281643519, 140737488351231, @@ -15496,7 +15141,7 @@ ERASE, 139635393024000, 139635401412607, ERASE, 139635384627200, 139635384631295, ERASE, 139635384631296, 139635393019903, }; - unsigned long set25[] = { + static const unsigned long set25[] = { STORE, 140737488347136, 140737488351231, STORE, 140737488343040, 140737488351231, STORE, 140722547441664, 140737488351231, @@ -22284,7 +21929,7 @@ STORE, 140249652703232, 140249682087935, STORE, 140249682087936, 140249710600191, }; - unsigned long set26[] = { + static const unsigned long set26[] = { STORE, 140737488347136, 140737488351231, STORE, 140729464770560, 140737488351231, SNULL, 140729464774655, 140737488351231, @@ -22308,7 +21953,7 @@ ERASE, 140109040951296, 140109040959487, STORE, 140109040955392, 140109040959487, ERASE, 140109040955392, 140109040959487, }; - unsigned long set27[] = { + static const unsigned long set27[] = { STORE, 140737488347136, 140737488351231, STORE, 140726128070656, 140737488351231, SNULL, 140726128074751, 140737488351231, @@ -22704,7 +22349,7 @@ STORE, 140415509696512, 140415535910911, ERASE, 140415537422336, 140415562588159, STORE, 140415482433536, 140415509696511, }; - unsigned long set28[] = { + static const unsigned long set28[] = { STORE, 140737488347136, 140737488351231, STORE, 140722475622400, 140737488351231, SNULL, 140722475626495, 140737488351231, @@ -22772,7 +22417,7 @@ STORE, 139918413348864, 139918413352959, ERASE, 139918413316096, 139918413344767, STORE, 93865848528896, 93865848664063, }; - unsigned long set29[] = { + static const unsigned long set29[] = { STORE, 140737488347136, 140737488351231, STORE, 140734467944448, 140737488351231, SNULL, 140734467948543, 140737488351231, @@ -23647,7 +23292,7 @@ ERASE, 140143079972864, 140143088361471, ERASE, 140143205793792, 140143205797887, ERASE, 140143205797888, 140143214186495, }; - unsigned long set30[] = { + static const unsigned long set30[] = { STORE, 140737488347136, 140737488351231, STORE, 140733436743680, 140737488351231, SNULL, 140733436747775, 140737488351231, @@ -24529,7 +24174,7 @@ ERASE, 140165225893888, 140165225897983, ERASE, 140165225897984, 140165234286591, ERASE, 140165058105344, 140165058109439, }; - unsigned long set31[] = { + static const unsigned long set31[] = { STORE, 140737488347136, 140737488351231, STORE, 140730890784768, 140737488351231, SNULL, 140730890788863, 140737488351231, @@ -25342,7 +24987,7 @@ ERASE, 140623906590720, 140623914979327, ERASE, 140622950277120, 140622950281215, ERASE, 140622950281216, 140622958669823, }; - unsigned long set32[] = { + static const unsigned long set32[] = { STORE, 140737488347136, 140737488351231, STORE, 140731244212224, 140737488351231, SNULL, 140731244216319, 140737488351231, @@ -26138,7 +25783,7 @@ ERASE, 140400417288192, 140400425676799, ERASE, 140400283066368, 140400283070463, ERASE, 140400283070464, 140400291459071, }; - unsigned long set33[] = { + static const unsigned long set33[] = { STORE, 140737488347136, 140737488351231, STORE, 140734562918400, 140737488351231, SNULL, 140734562922495, 140737488351231, @@ -26280,7 +25925,7 @@ STORE, 140582961786880, 140583003750399, ERASE, 140582961786880, 140583003750399, }; - unsigned long set34[] = { + static const unsigned long set34[] = { STORE, 140737488347136, 140737488351231, STORE, 140731327180800, 140737488351231, SNULL, 140731327184895, 140737488351231, @@ -27161,7 +26806,7 @@ ERASE, 140012522094592, 140012530483199, ERASE, 140012033142784, 140012033146879, ERASE, 140012033146880, 140012041535487, }; - unsigned long set35[] = { + static const unsigned long set35[] = { STORE, 140737488347136, 140737488351231, STORE, 140730536939520, 140737488351231, SNULL, 140730536943615, 140737488351231, @@ -27918,7 +27563,7 @@ ERASE, 140474471936000, 140474480324607, ERASE, 140474396430336, 140474396434431, ERASE, 140474396434432, 140474404823039, }; - unsigned long set36[] = { + static const unsigned long set36[] = { STORE, 140737488347136, 140737488351231, STORE, 140723893125120, 140737488351231, SNULL, 140723893129215, 140737488351231, @@ -28779,7 +28424,7 @@ ERASE, 140121890357248, 140121898745855, ERASE, 140121269587968, 140121269592063, ERASE, 140121269592064, 140121277980671, }; - unsigned long set37[] = { + static const unsigned long set37[] = { STORE, 140737488347136, 140737488351231, STORE, 140722404016128, 140737488351231, SNULL, 140722404020223, 140737488351231, @@ -28905,7 +28550,7 @@ STORE, 139759821246464, 139759888355327, ERASE, 139759821246464, 139759888355327, ERASE, 139759888355328, 139759955464191, }; - unsigned long set38[] = { + static const unsigned long set38[] = { STORE, 140737488347136, 140737488351231, STORE, 140730666221568, 140737488351231, SNULL, 140730666225663, 140737488351231, @@ -29715,7 +29360,7 @@ ERASE, 140613504712704, 140613504716799, ERASE, 140613504716800, 140613513105407, }; - unsigned long set39[] = { + static const unsigned long set39[] = { STORE, 140737488347136, 140737488351231, STORE, 140736271417344, 140737488351231, SNULL, 140736271421439, 140737488351231, @@ -30087,7 +29732,7 @@ STORE, 140325364428800, 140325372821503, STORE, 140325356036096, 140325364428799, SNULL, 140325364432895, 140325372821503, }; - unsigned long set40[] = { + static const unsigned long set40[] = { STORE, 140737488347136, 140737488351231, STORE, 140734309167104, 140737488351231, SNULL, 140734309171199, 140737488351231, @@ -30838,7 +30483,7 @@ ERASE, 140320289300480, 140320289304575, ERASE, 140320289304576, 140320297693183, ERASE, 140320163409920, 140320163414015, }; - unsigned long set41[] = { + static const unsigned long set41[] = { STORE, 140737488347136, 140737488351231, STORE, 140728157171712, 140737488351231, SNULL, 140728157175807, 140737488351231, @@ -31148,7 +30793,7 @@ STORE, 94376135090176, 94376135094271, STORE, 94376135094272, 94376135098367, SNULL, 94376135094272, 94377208836095, }; - unsigned long set42[] = { + static const unsigned long set42[] = { STORE, 314572800, 1388314623, STORE, 1462157312, 1462169599, STORE, 1462169600, 1462185983, @@ -33825,7 +33470,7 @@ SNULL, 3798999040, 3799101439, */ }; - unsigned long set43[] = { + static const unsigned long set43[] = { STORE, 140737488347136, 140737488351231, STORE, 140734187720704, 140737488351231, SNULL, 140734187724800, 140737488351231, @@ -34088,7 +33733,7 @@ STORE, 140501948112896, 140501948116991, mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); check_erase2_testset(mt, set27, ARRAY_SIZE(set27)); rcu_barrier(); - MT_BUG_ON(mt, 0 != mtree_load(mt, 140415537422336)); + MT_BUG_ON(mt, NULL != mtree_load(mt, 140415537422336)); mt_set_non_kernel(0); mt_validate(mt); mtree_destroy(mt); @@ -34212,7 +33857,7 @@ STORE, 140501948112896, 140501948116991, mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); check_erase2_testset(mt, set37, ARRAY_SIZE(set37)); rcu_barrier(); - MT_BUG_ON(mt, 0 != mtree_load(mt, 94637033459712)); + MT_BUG_ON(mt, NULL != mtree_load(mt, 94637033459712)); mt_validate(mt); mtree_destroy(mt); @@ -34220,7 +33865,7 @@ STORE, 140501948112896, 140501948116991, mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); check_erase2_testset(mt, set38, ARRAY_SIZE(set38)); rcu_barrier(); - MT_BUG_ON(mt, 0 != mtree_load(mt, 94637033459712)); + MT_BUG_ON(mt, NULL != mtree_load(mt, 94637033459712)); mt_validate(mt); mtree_destroy(mt); @@ -34476,7 +34121,7 @@ static void *rcu_reader_rev(void *ptr) if (mas.index != r_start) { alt = xa_mk_value(index + i * 2 + 1 + RCU_RANGE_COUNT); - mt_dump(test->mt); + mt_dump(test->mt, mt_dump_dec); printk("Error: %lu-%lu %p != %lu-%lu %p %p line %d i %d\n", mas.index, mas.last, entry, r_start, r_end, expected, alt, @@ -34917,13 +34562,77 @@ void run_check_rcu(struct maple_tree *mt, struct rcu_test_struct *vals) MT_BUG_ON(mt, !vals->seen_entry2); } +static void *rcu_slot_store_reader(void *ptr) +{ + struct rcu_test_struct3 *test = ptr; + MA_STATE(mas, test->mt, test->index, test->index); + + rcu_register_thread(); + + rcu_read_lock(); + while (!test->stop) { + mas_walk(&mas); + /* The length of growth to both sides must be equal. */ + RCU_MT_BUG_ON(test, (test->index - mas.index) != + (mas.last - test->last)); + } + rcu_read_unlock(); + + rcu_unregister_thread(); + return NULL; +} + +static noinline void run_check_rcu_slot_store(struct maple_tree *mt) +{ + pthread_t readers[20]; + int range_cnt = 200, i, limit = 10000; + unsigned long len = ULONG_MAX / range_cnt, start, end; + struct rcu_test_struct3 test = {.stop = false, .mt = mt}; + + start = range_cnt / 2 * len; + end = start + len - 1; + test.index = start; + test.last = end; + + for (i = 0; i < range_cnt; i++) { + mtree_store_range(mt, i * len, i * len + len - 1, + xa_mk_value(i * 100), GFP_KERNEL); + } + + mt_set_in_rcu(mt); + MT_BUG_ON(mt, !mt_in_rcu(mt)); + + for (i = 0; i < ARRAY_SIZE(readers); i++) { + if (pthread_create(&readers[i], NULL, rcu_slot_store_reader, + &test)) { + perror("creating reader thread"); + exit(1); + } + } + + usleep(5); + + while (limit--) { + /* Step by step, expand the most middle range to both sides. */ + mtree_store_range(mt, --start, ++end, xa_mk_value(100), + GFP_KERNEL); + } + + test.stop = true; + + while (i--) + pthread_join(readers[i], NULL); + + mt_validate(mt); +} + static noinline void run_check_rcu_slowread(struct maple_tree *mt, struct rcu_test_struct *vals) { int i; void *(*function)(void *); - pthread_t readers[20]; + pthread_t readers[30]; unsigned int index = vals->index; mt_set_in_rcu(mt); @@ -34941,14 +34650,14 @@ void run_check_rcu_slowread(struct maple_tree *mt, struct rcu_test_struct *vals) } } - usleep(5); /* small yield to ensure all threads are at least started. */ + usleep(3); /* small yield to ensure all threads are at least started. */ while (index <= vals->last) { mtree_store(mt, index, (index % 2 ? vals->entry2 : vals->entry3), GFP_KERNEL); index++; - usleep(5); + usleep(2); } while (i--) @@ -34959,7 +34668,8 @@ void run_check_rcu_slowread(struct maple_tree *mt, struct rcu_test_struct *vals) MT_BUG_ON(mt, !vals->seen_entry3); MT_BUG_ON(mt, !vals->seen_both); } -static noinline void check_rcu_simulated(struct maple_tree *mt) + +static noinline void __init check_rcu_simulated(struct maple_tree *mt) { unsigned long i, nr_entries = 1000; unsigned long target = 4320; @@ -35120,7 +34830,7 @@ static noinline void check_rcu_simulated(struct maple_tree *mt) rcu_unregister_thread(); } -static noinline void check_rcu_threaded(struct maple_tree *mt) +static noinline void __init check_rcu_threaded(struct maple_tree *mt) { unsigned long i, nr_entries = 1000; struct rcu_test_struct vals; @@ -35169,6 +34879,10 @@ static noinline void check_rcu_threaded(struct maple_tree *mt) run_check_rcu(mt, &vals); mtree_destroy(mt); + /* Check expanding range in RCU mode */ + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + run_check_rcu_slot_store(mt); + mtree_destroy(mt); /* Forward writer for rcu stress */ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); @@ -35222,8 +34936,9 @@ static void mas_dfs_preorder(struct ma_state *mas) struct maple_enode *prev; unsigned char end, slot = 0; + unsigned long *pivots; - if (mas->node == MAS_START) { + if (mas->status == ma_start) { mas_start(mas); return; } @@ -35254,10 +34969,13 @@ walk_up: mas_ascend(mas); goto walk_up; } + pivots = ma_pivots(mte_to_node(prev), mte_node_type(prev)); + mas->max = mas_safe_pivot(mas, pivots, slot, mte_node_type(prev)); + mas->min = mas_safe_min(mas, pivots, slot); return; done: - mas->node = MAS_NONE; + mas->status = ma_none; } @@ -35307,17 +35025,6 @@ static void check_dfs_preorder(struct maple_tree *mt) MT_BUG_ON(mt, count != e); mtree_destroy(mt); - mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); - mas_reset(&mas); - mt_zero_nr_tallocated(); - mt_set_non_kernel(200); - mas_expected_entries(&mas, max); - for (count = 0; count <= max; count++) { - mas.index = mas.last = count; - mas_store(&mas, xa_mk_value(count)); - MT_BUG_ON(mt, mas_is_err(&mas)); - } - mas_destroy(&mas); rcu_barrier(); /* * pr_info(" ->seq test of 0-%lu %luK in %d active (%d total)\n", @@ -35328,34 +35035,104 @@ static void check_dfs_preorder(struct maple_tree *mt) } /* End of depth first search tests */ +/* get height of the lowest non-leaf node with free space */ +static unsigned char get_vacant_height(struct ma_wr_state *wr_mas, void *entry) +{ + struct ma_state *mas = wr_mas->mas; + char vacant_height = 0; + enum maple_type type; + unsigned long *pivots; + unsigned long min = 0; + unsigned long max = ULONG_MAX; + unsigned char offset; + + /* start traversal */ + mas_reset(mas); + mas_start(mas); + if (!xa_is_node(mas_root(mas))) + return 0; + + type = mte_node_type(mas->node); + wr_mas->type = type; + while (!ma_is_leaf(type)) { + mas_node_walk(mas, mte_to_node(mas->node), type, &min, &max); + offset = mas->offset; + mas->end = mas_data_end(mas); + pivots = ma_pivots(mte_to_node(mas->node), type); + + if (pivots) { + if (offset) + min = pivots[mas->offset - 1]; + if (offset < mas->end) + max = pivots[mas->offset]; + } + wr_mas->r_max = offset < mas->end ? pivots[offset] : mas->max; + + /* detect spanning write */ + if (mas_is_span_wr(wr_mas)) + break; + + if (mas->end < mt_slot_count(mas->node) - 1) + vacant_height = mas->depth + 1; + + mas_descend(mas); + type = mte_node_type(mas->node); + mas->depth++; + } + + return vacant_height; +} + +static int mas_allocated(struct ma_state *mas) +{ + int total = 0; + + if (mas->alloc) + total++; + + if (mas->sheaf) + total += kmem_cache_sheaf_size(mas->sheaf); + + return total; +} /* Preallocation testing */ -static noinline void check_prealloc(struct maple_tree *mt) +static noinline void __init check_prealloc(struct maple_tree *mt) { unsigned long i, max = 100; unsigned long allocated; unsigned char height; + unsigned char vacant_height; struct maple_node *mn; void *ptr = check_prealloc; MA_STATE(mas, mt, 10, 20); + MA_WR_STATE(wr_mas, &mas, ptr); mt_set_non_kernel(1000); for (i = 0; i <= max; i++) mtree_test_store_range(mt, i * 10, i * 10 + 5, &i); - MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); + /* Spanning store */ + mas_set_range(&mas, 470, 500); + + mas_wr_preallocate(&wr_mas, ptr); + MT_BUG_ON(mt, mas.store_type != wr_spanning_store); + MT_BUG_ON(mt, mas_is_err(&mas)); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); + vacant_height = get_vacant_height(&wr_mas, ptr); MT_BUG_ON(mt, allocated == 0); - MT_BUG_ON(mt, allocated != 1 + height * 3); + MT_BUG_ON(mt, allocated != 1 + (height - vacant_height) * 3); mas_destroy(&mas); allocated = mas_allocated(&mas); MT_BUG_ON(mt, allocated != 0); + mas_wr_preallocate(&wr_mas, ptr); MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); + vacant_height = get_vacant_height(&wr_mas, ptr); MT_BUG_ON(mt, allocated == 0); - MT_BUG_ON(mt, allocated != 1 + height * 3); + MT_BUG_ON(mt, allocated != 1 + (height - vacant_height) * 3); MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); mas_destroy(&mas); allocated = mas_allocated(&mas); @@ -35365,10 +35142,11 @@ static noinline void check_prealloc(struct maple_tree *mt) MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated == 0); - MT_BUG_ON(mt, allocated != 1 + height * 3); + vacant_height = get_vacant_height(&wr_mas, ptr); + MT_BUG_ON(mt, allocated != 1 + (height - vacant_height) * 3); mn = mas_pop_node(&mas); MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1); + mn->parent = ma_parent_ptr(mn); ma_free_rcu(mn); MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); mas_destroy(&mas); @@ -35378,60 +35156,53 @@ static noinline void check_prealloc(struct maple_tree *mt) MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated == 0); - MT_BUG_ON(mt, allocated != 1 + height * 3); + vacant_height = get_vacant_height(&wr_mas, ptr); + MT_BUG_ON(mt, allocated != 1 + (height - vacant_height) * 3); mn = mas_pop_node(&mas); MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1); MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); mas_destroy(&mas); allocated = mas_allocated(&mas); MT_BUG_ON(mt, allocated != 0); + mn->parent = ma_parent_ptr(mn); ma_free_rcu(mn); MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated == 0); - MT_BUG_ON(mt, allocated != 1 + height * 3); - mn = mas_pop_node(&mas); - MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1); - mas_push_node(&mas, mn); - MT_BUG_ON(mt, mas_allocated(&mas) != allocated); - MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); - mas_destroy(&mas); - allocated = mas_allocated(&mas); - MT_BUG_ON(mt, allocated != 0); - - MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); - allocated = mas_allocated(&mas); - height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated == 0); - MT_BUG_ON(mt, allocated != 1 + height * 3); + vacant_height = get_vacant_height(&wr_mas, ptr); + MT_BUG_ON(mt, allocated != 1 + (height - vacant_height) * 3); mas_store_prealloc(&mas, ptr); MT_BUG_ON(mt, mas_allocated(&mas) != 0); + /* Slot store does not need allocations */ + mas_set_range(&mas, 6, 9); MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); - height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated == 0); - MT_BUG_ON(mt, allocated != 1 + height * 3); + MT_BUG_ON(mt, allocated != 0); mas_store_prealloc(&mas, ptr); MT_BUG_ON(mt, mas_allocated(&mas) != 0); + + mas_set_range(&mas, 6, 10); MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated == 0); - MT_BUG_ON(mt, allocated != 1 + height * 3); + MT_BUG_ON(mt, allocated != 0); mas_store_prealloc(&mas, ptr); + MT_BUG_ON(mt, mas_allocated(&mas) != 0); + /* Split */ + mas_set_range(&mas, 54, 54); MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); - MT_BUG_ON(mt, allocated == 0); - MT_BUG_ON(mt, allocated != 1 + height * 3); + vacant_height = get_vacant_height(&wr_mas, ptr); + MT_BUG_ON(mt, allocated != 1 + (height - vacant_height) * 2); mas_store_prealloc(&mas, ptr); MT_BUG_ON(mt, mas_allocated(&mas) != 0); mt_set_non_kernel(1); + /* Spanning store */ + mas_set_range(&mas, 1, 100); MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL & GFP_NOWAIT) == 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); @@ -35439,23 +35210,43 @@ static noinline void check_prealloc(struct maple_tree *mt) mas_destroy(&mas); + /* Spanning store */ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); + vacant_height = get_vacant_height(&wr_mas, ptr); MT_BUG_ON(mt, allocated == 0); - MT_BUG_ON(mt, allocated != 1 + height * 3); + /* + * vacant height cannot be used to compute the number of nodes needed + * as the root contains two entries which means it is on the verge of + * insufficiency. The worst case full height of the tree is needed. + */ + MT_BUG_ON(mt, allocated != height * 3 + 1); mas_store_prealloc(&mas, ptr); MT_BUG_ON(mt, mas_allocated(&mas) != 0); + mas_set_range(&mas, 0, 200); mt_set_non_kernel(1); MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL & GFP_NOWAIT) == 0); allocated = mas_allocated(&mas); height = mas_mt_height(&mas); MT_BUG_ON(mt, allocated != 0); + + /* Chaining multiple preallocations */ + mt_set_in_rcu(mt); + mas_set_range(&mas, 800, 805); /* Slot store, should be 0 allocations */ + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); + allocated = mas_allocated(&mas); + MT_BUG_ON(mt, allocated != 0); + mas.last = 809; /* Node store */ + MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0); + allocated = mas_allocated(&mas); + MT_BUG_ON(mt, allocated != 1); + mas_store_prealloc(&mas, ptr); } /* End of preallocation testing */ /* Spanning writes, writes that span nodes and layers of the tree */ -static noinline void check_spanning_write(struct maple_tree *mt) +static noinline void __init check_spanning_write(struct maple_tree *mt) { unsigned long i, max = 5000; MA_STATE(mas, mt, 1200, 2380); @@ -35623,7 +35414,7 @@ static noinline void check_spanning_write(struct maple_tree *mt) /* End of spanning write testing */ /* Writes to a NULL area that are adjacent to other NULLs */ -static noinline void check_null_expand(struct maple_tree *mt) +static noinline void __init check_null_expand(struct maple_tree *mt) { unsigned long i, max = 100; unsigned char data_end; @@ -35684,7 +35475,7 @@ static noinline void check_null_expand(struct maple_tree *mt) /* End of NULL area expansions */ /* Checking for no memory is best done outside the kernel */ -static noinline void check_nomem(struct maple_tree *mt) +static noinline void __init check_nomem(struct maple_tree *mt) { MA_STATE(ms, mt, 1, 1); @@ -35709,17 +35500,18 @@ static noinline void check_nomem(struct maple_tree *mt) mas_store(&ms, &ms); /* insert 1 -> &ms, fails. */ MT_BUG_ON(mt, ms.node != MA_ERROR(-ENOMEM)); mas_nomem(&ms, GFP_KERNEL); /* Node allocated in here. */ - MT_BUG_ON(mt, ms.node != MAS_START); + MT_BUG_ON(mt, ms.status != ma_start); mtree_unlock(mt); MT_BUG_ON(mt, mtree_insert(mt, 2, mt, GFP_KERNEL) != 0); mtree_lock(mt); mas_store(&ms, &ms); /* insert 1 -> &ms */ mas_nomem(&ms, GFP_KERNEL); /* Node allocated in here. */ mtree_unlock(mt); + mas_destroy(&ms); mtree_destroy(mt); } -static noinline void check_locky(struct maple_tree *mt) +static noinline void __init check_locky(struct maple_tree *mt) { MA_STATE(ms, mt, 2, 2); MA_STATE(reader, mt, 2, 2); @@ -35734,17 +35526,523 @@ static noinline void check_locky(struct maple_tree *mt) mt_clear_in_rcu(mt); } +/* + * Compares two nodes except for the addresses stored in the nodes. + * Returns zero if they are the same, otherwise returns non-zero. + */ +static int __init compare_node(struct maple_enode *enode_a, + struct maple_enode *enode_b) +{ + struct maple_node *node_a, *node_b; + struct maple_node a, b; + void **slots_a, **slots_b; /* Do not use the rcu tag. */ + enum maple_type type; + int i; + + if (((unsigned long)enode_a & MAPLE_NODE_MASK) != + ((unsigned long)enode_b & MAPLE_NODE_MASK)) { + pr_err("The lower 8 bits of enode are different.\n"); + return -1; + } + + type = mte_node_type(enode_a); + node_a = mte_to_node(enode_a); + node_b = mte_to_node(enode_b); + a = *node_a; + b = *node_b; + + /* Do not compare addresses. */ + if (ma_is_root(node_a) || ma_is_root(node_b)) { + a.parent = (struct maple_pnode *)((unsigned long)a.parent & + MA_ROOT_PARENT); + b.parent = (struct maple_pnode *)((unsigned long)b.parent & + MA_ROOT_PARENT); + } else { + a.parent = (struct maple_pnode *)((unsigned long)a.parent & + MAPLE_NODE_MASK); + b.parent = (struct maple_pnode *)((unsigned long)b.parent & + MAPLE_NODE_MASK); + } + + if (a.parent != b.parent) { + pr_err("The lower 8 bits of parents are different. %p %p\n", + a.parent, b.parent); + return -1; + } + + /* + * If it is a leaf node, the slots do not contain the node address, and + * no special processing of slots is required. + */ + if (ma_is_leaf(type)) + goto cmp; + + slots_a = ma_slots(&a, type); + slots_b = ma_slots(&b, type); + + for (i = 0; i < mt_slots[type]; i++) { + if (!slots_a[i] && !slots_b[i]) + break; + + if (!slots_a[i] || !slots_b[i]) { + pr_err("The number of slots is different.\n"); + return -1; + } + + /* Do not compare addresses in slots. */ + ((unsigned long *)slots_a)[i] &= MAPLE_NODE_MASK; + ((unsigned long *)slots_b)[i] &= MAPLE_NODE_MASK; + } + +cmp: + /* + * Compare all contents of two nodes, including parent (except address), + * slots (except address), pivots, gaps and metadata. + */ + return memcmp(&a, &b, sizeof(struct maple_node)); +} + +/* + * Compare two trees and return 0 if they are the same, non-zero otherwise. + */ +static int __init compare_tree(struct maple_tree *mt_a, struct maple_tree *mt_b) +{ + MA_STATE(mas_a, mt_a, 0, 0); + MA_STATE(mas_b, mt_b, 0, 0); + + if (mt_a->ma_flags != mt_b->ma_flags) { + pr_err("The flags of the two trees are different.\n"); + return -1; + } + + mas_dfs_preorder(&mas_a); + mas_dfs_preorder(&mas_b); + + if (mas_is_ptr(&mas_a) || mas_is_ptr(&mas_b)) { + if (!(mas_is_ptr(&mas_a) && mas_is_ptr(&mas_b))) { + pr_err("One is ma_root and the other is not.\n"); + return -1; + } + return 0; + } + + while (!mas_is_none(&mas_a) || !mas_is_none(&mas_b)) { + + if (mas_is_none(&mas_a) || mas_is_none(&mas_b)) { + pr_err("One is ma_none and the other is not.\n"); + return -1; + } + + if (mas_a.min != mas_b.min || + mas_a.max != mas_b.max) { + pr_err("mas->min, mas->max do not match.\n"); + return -1; + } + + if (compare_node(mas_a.node, mas_b.node)) { + pr_err("The contents of nodes %p and %p are different.\n", + mas_a.node, mas_b.node); + mt_dump(mt_a, mt_dump_dec); + mt_dump(mt_b, mt_dump_dec); + return -1; + } + + mas_dfs_preorder(&mas_a); + mas_dfs_preorder(&mas_b); + } + + return 0; +} + +static __init void mas_subtree_max_range(struct ma_state *mas) +{ + unsigned long limit = mas->max; + MA_STATE(newmas, mas->tree, 0, 0); + void *entry; + + mas_for_each(mas, entry, limit) { + if (mas->last - mas->index >= + newmas.last - newmas.index) { + newmas = *mas; + } + } + + *mas = newmas; +} + +/* + * build_full_tree() - Build a full tree. + * @mt: The tree to build. + * @flags: Use @flags to build the tree. + * @height: The height of the tree to build. + * + * Build a tree with full leaf nodes and internal nodes. Note that the height + * should not exceed 3, otherwise it will take a long time to build. + * Return: zero if the build is successful, non-zero if it fails. + */ +static __init int build_full_tree(struct maple_tree *mt, unsigned int flags, + int height) +{ + MA_STATE(mas, mt, 0, 0); + unsigned long step; + int ret = 0, cnt = 1; + enum maple_type type; + + mt_init_flags(mt, flags); + mtree_insert_range(mt, 0, ULONG_MAX, xa_mk_value(5), GFP_KERNEL); + + mtree_lock(mt); + + while (1) { + mas_set(&mas, 0); + if (mt_height(mt) < height) { + mas.max = ULONG_MAX; + goto store; + } + + while (1) { + mas_dfs_preorder(&mas); + if (mas_is_none(&mas)) + goto unlock; + + type = mte_node_type(mas.node); + if (mas_data_end(&mas) + 1 < mt_slots[type]) { + mas_set(&mas, mas.min); + goto store; + } + } +store: + mas_subtree_max_range(&mas); + step = mas.last - mas.index; + if (step < 1) { + ret = -1; + goto unlock; + } + + step /= 2; + mas.last = mas.index + step; + mas_store_gfp(&mas, xa_mk_value(5), + GFP_KERNEL); + ++cnt; + } +unlock: + mtree_unlock(mt); + + MT_BUG_ON(mt, mt_height(mt) != height); + /* pr_info("height:%u number of elements:%d\n", mt_height(mt), cnt); */ + return ret; +} + +static noinline void __init check_mtree_dup(struct maple_tree *mt) +{ + DEFINE_MTREE(new); + int i, j, ret, count = 0; + unsigned int rand_seed = 17, rand; + + /* store a value at [0, 0] */ + mt_init_flags(mt, 0); + mtree_store_range(mt, 0, 0, xa_mk_value(0), GFP_KERNEL); + ret = mtree_dup(mt, &new, GFP_KERNEL); + MT_BUG_ON(&new, ret); + mt_validate(&new); + if (compare_tree(mt, &new)) + MT_BUG_ON(&new, 1); + + mtree_destroy(mt); + mtree_destroy(&new); + + /* The two trees have different attributes. */ + mt_init_flags(mt, 0); + mt_init_flags(&new, MT_FLAGS_ALLOC_RANGE); + ret = mtree_dup(mt, &new, GFP_KERNEL); + MT_BUG_ON(&new, ret != -EINVAL); + mtree_destroy(mt); + mtree_destroy(&new); + + /* The new tree is not empty */ + mt_init_flags(mt, 0); + mt_init_flags(&new, 0); + mtree_store(&new, 5, xa_mk_value(5), GFP_KERNEL); + ret = mtree_dup(mt, &new, GFP_KERNEL); + MT_BUG_ON(&new, ret != -EINVAL); + mtree_destroy(mt); + mtree_destroy(&new); + + /* Test for duplicating full trees. */ + for (i = 1; i <= 3; i++) { + ret = build_full_tree(mt, 0, i); + MT_BUG_ON(mt, ret); + mt_init_flags(&new, 0); + + ret = mtree_dup(mt, &new, GFP_KERNEL); + MT_BUG_ON(&new, ret); + mt_validate(&new); + if (compare_tree(mt, &new)) + MT_BUG_ON(&new, 1); + + mtree_destroy(mt); + mtree_destroy(&new); + } + + for (i = 1; i <= 3; i++) { + ret = build_full_tree(mt, MT_FLAGS_ALLOC_RANGE, i); + MT_BUG_ON(mt, ret); + mt_init_flags(&new, MT_FLAGS_ALLOC_RANGE); + + ret = mtree_dup(mt, &new, GFP_KERNEL); + MT_BUG_ON(&new, ret); + mt_validate(&new); + if (compare_tree(mt, &new)) + MT_BUG_ON(&new, 1); + + mtree_destroy(mt); + mtree_destroy(&new); + } + + /* Test for normal duplicating. */ + for (i = 0; i < 1000; i += 3) { + if (i & 1) { + mt_init_flags(mt, 0); + mt_init_flags(&new, 0); + } else { + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + mt_init_flags(&new, MT_FLAGS_ALLOC_RANGE); + } + + for (j = 0; j < i; j++) { + mtree_store_range(mt, j * 10, j * 10 + 5, + xa_mk_value(j), GFP_KERNEL); + } + + ret = mtree_dup(mt, &new, GFP_KERNEL); + MT_BUG_ON(&new, ret); + mt_validate(&new); + if (compare_tree(mt, &new)) + MT_BUG_ON(&new, 1); + + mtree_destroy(mt); + mtree_destroy(&new); + } + + /* Test memory allocation failed. */ + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + for (i = 0; i < 30; i += 3) { + mtree_store_range(mt, j * 10, j * 10 + 5, + xa_mk_value(j), GFP_KERNEL); + } + + /* Failed at the first node. */ + mt_init_flags(&new, MT_FLAGS_ALLOC_RANGE); + mt_set_non_kernel(0); + ret = mtree_dup(mt, &new, GFP_NOWAIT); + mt_set_non_kernel(0); + MT_BUG_ON(&new, ret != -ENOMEM); + mtree_destroy(mt); + mtree_destroy(&new); + + /* Random maple tree fails at a random node. */ + for (i = 0; i < 1000; i += 3) { + if (i & 1) { + mt_init_flags(mt, 0); + mt_init_flags(&new, 0); + } else { + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + mt_init_flags(&new, MT_FLAGS_ALLOC_RANGE); + } + + for (j = 0; j < i; j++) { + mtree_store_range(mt, j * 10, j * 10 + 5, + xa_mk_value(j), GFP_KERNEL); + } + /* + * The rand() library function is not used, so we can generate + * the same random numbers on any platform. + */ + rand_seed = rand_seed * 1103515245 + 12345; + rand = rand_seed / 65536 % 128; + mt_set_non_kernel(rand); + + ret = mtree_dup(mt, &new, GFP_NOWAIT); + mt_set_non_kernel(0); + if (ret != 0) { + MT_BUG_ON(&new, ret != -ENOMEM); + count++; + mtree_destroy(mt); + continue; + } + + mt_validate(&new); + if (compare_tree(mt, &new)) + MT_BUG_ON(&new, 1); + + mtree_destroy(mt); + mtree_destroy(&new); + } + + /* pr_info("mtree_dup() fail %d times\n", count); */ + BUG_ON(!count); +} + extern void test_kmem_cache_bulk(void); +static inline void check_spanning_store_height(struct maple_tree *mt) +{ + int index = 0; + int last = 140; + MA_STATE(mas, mt, 0, 0); + mas_lock(&mas); + while (mt_height(mt) != 3) { + mas_store_gfp(&mas, xa_mk_value(index), GFP_KERNEL); + mas_set(&mas, ++index); + } + + if (MAPLE_32BIT) + last = 155; /* 32 bit higher branching factor. */ + + mas_set_range(&mas, 90, last); + mas_store_gfp(&mas, xa_mk_value(index), GFP_KERNEL); + MT_BUG_ON(mt, mas_mt_height(&mas) != 2); + mas_unlock(&mas); +} + +/* + * Test to check the path of a spanning rebalance which results in + * a collapse where the rebalancing of the child node leads to + * insufficieny in the parent node. + */ +static void check_collapsing_rebalance(struct maple_tree *mt) +{ + int i = 0; + MA_STATE(mas, mt, ULONG_MAX, ULONG_MAX); + + /* create a height 6 tree */ + while (mt_height(mt) < 6) { + mtree_store_range(mt, i, i + 10, xa_mk_value(i), GFP_KERNEL); + i += 9; + } + + /* delete all entries one at a time, starting from the right */ + do { + mas_erase(&mas); + } while (mas_prev(&mas, 0) != NULL); + + mtree_unlock(mt); +} + +/* callback function used for check_nomem_writer_race() */ +static void writer2(void *maple_tree) +{ + struct maple_tree *mt = (struct maple_tree *)maple_tree; + MA_STATE(mas, mt, 6, 10); + + mtree_lock(mas.tree); + mas_store(&mas, xa_mk_value(0xC)); + mas_destroy(&mas); + mtree_unlock(mas.tree); +} + +/* + * check_nomem_writer_race() - test a possible race in the mas_nomem() path + * @mt: The tree to build. + * + * There is a possible race condition in low memory conditions when mas_nomem() + * gives up its lock. A second writer can chagne the entry that the primary + * writer executing the mas_nomem() path is modifying. This test recreates this + * scenario to ensure we are handling it correctly. + */ +static void check_nomem_writer_race(struct maple_tree *mt) +{ + MA_STATE(mas, mt, 0, 5); + + mt_set_non_kernel(0); + /* setup root with 2 values with NULL in between */ + mtree_store_range(mt, 0, 5, xa_mk_value(0xA), GFP_KERNEL); + mtree_store_range(mt, 6, 10, NULL, GFP_KERNEL); + mtree_store_range(mt, 11, 15, xa_mk_value(0xB), GFP_KERNEL); + + /* setup writer 2 that will trigger the race condition */ + mt_set_private(mt); + mt_set_callback(writer2); + + mtree_lock(mt); + /* erase 0-5 */ + mas_erase(&mas); + + /* index 6-10 should retain the value from writer 2 */ + check_load(mt, 6, xa_mk_value(0xC)); + mtree_unlock(mt); + + mt_set_non_kernel(0); + /* test for the same race but with mas_store_gfp() */ + mtree_store_range(mt, 0, 5, xa_mk_value(0xA), GFP_KERNEL); + mtree_store_range(mt, 6, 10, NULL, GFP_KERNEL); + + mas_set_range(&mas, 0, 5); + + /* setup writer 2 that will trigger the race condition */ + mt_set_private(mt); + mt_set_callback(writer2); + + mtree_lock(mt); + mas_store_gfp(&mas, NULL, GFP_KERNEL); + + /* ensure write made by writer 2 is retained */ + check_load(mt, 6, xa_mk_value(0xC)); + + mt_set_private(NULL); + mt_set_callback(NULL); + mtree_unlock(mt); +} + + /* test to simulate expanding a vma from [0x7fffffffe000, 0x7ffffffff000) + * to [0x7ffde4ca1000, 0x7ffffffff000) and then shrinking the vma to + * [0x7ffde4ca1000, 0x7ffde4ca2000) + */ +static inline int check_vma_modification(struct maple_tree *mt) +{ +#if defined(CONFIG_64BIT) + MA_STATE(mas, mt, 0, 0); + + mtree_lock(mt); + /* vma with old start and old end */ + __mas_set_range(&mas, 0x7fffffffe000, 0x7ffffffff000 - 1); + mas_preallocate(&mas, xa_mk_value(1), GFP_KERNEL); + mas_store_prealloc(&mas, xa_mk_value(1)); + + /* next write occurs partly in previous range [0, 0x7fffffffe000)*/ + mas_prev_range(&mas, 0); + /* expand vma to {0x7ffde4ca1000, 0x7ffffffff000) */ + __mas_set_range(&mas, 0x7ffde4ca1000, 0x7ffffffff000 - 1); + mas_preallocate(&mas, xa_mk_value(1), GFP_KERNEL); + mas_store_prealloc(&mas, xa_mk_value(1)); + + /* shrink vma to [0x7ffde4ca1000, 7ffde4ca2000) */ + __mas_set_range(&mas, 0x7ffde4ca2000, 0x7ffffffff000 - 1); + mas_preallocate(&mas, NULL, GFP_KERNEL); + mas_store_prealloc(&mas, NULL); + mt_dump(mt, mt_dump_hex); + + mas_destroy(&mas); + mtree_unlock(mt); +#endif + + return 0; +} + + void farmer_tests(void) { struct maple_node *node; DEFINE_MTREE(tree); - mt_dump(&tree); + mt_dump(&tree, mt_dump_dec); + + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN | MT_FLAGS_USE_RCU); + check_vma_modification(&tree); + mtree_destroy(&tree); tree.ma_root = xa_mk_value(0); - mt_dump(&tree); + mt_dump(&tree, mt_dump_dec); node = mt_alloc_one(GFP_KERNEL); node->parent = (void *)((unsigned long)(&tree) | 1); @@ -35754,8 +36052,9 @@ void farmer_tests(void) node->mr64.pivot[1] = 1; node->mr64.pivot[2] = 0; tree.ma_root = mt_mk_node(node, maple_leaf_64); - mt_dump(&tree); + mt_dump(&tree, mt_dump_dec); + node->parent = ma_parent_ptr(node); ma_free_rcu(node); /* Check things that will make lockdep angry */ @@ -35768,6 +36067,10 @@ void farmer_tests(void) check_dfs_preorder(&tree); mtree_destroy(&tree); + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_USE_RCU); + check_nomem_writer_race(&tree); + mtree_destroy(&tree); + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); check_prealloc(&tree); mtree_destroy(&tree); @@ -35777,16 +36080,24 @@ void farmer_tests(void) mtree_destroy(&tree); mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + check_spanning_store_height(&tree); + mtree_destroy(&tree); + + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + check_collapsing_rebalance(&tree); + mtree_destroy(&tree); + + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); check_null_expand(&tree); mtree_destroy(&tree); - /* RCU testing */ mt_init_flags(&tree, 0); - check_erase_testset(&tree); + check_mtree_dup(&tree); mtree_destroy(&tree); + /* RCU testing */ mt_init_flags(&tree, 0); - check_new_node(&tree); + check_erase_testset(&tree); mtree_destroy(&tree); if (!MAPLE_32BIT) { @@ -35812,9 +36123,95 @@ void farmer_tests(void) check_nomem(&tree); } +static unsigned long get_last_index(struct ma_state *mas) +{ + struct maple_node *node = mas_mn(mas); + enum maple_type mt = mte_node_type(mas->node); + unsigned long *pivots = ma_pivots(node, mt); + unsigned long last_index = mas_data_end(mas); + + BUG_ON(last_index == 0); + + return pivots[last_index - 1] + 1; +} + +/* + * Assert that we handle spanning stores that consume the entirety of the right + * leaf node correctly. + */ +static void test_spanning_store_regression(void) +{ + unsigned long from = 0, to = 0; + DEFINE_MTREE(tree); + MA_STATE(mas, &tree, 0, 0); + + /* + * Build a 3-level tree. We require a parent node below the root node + * and 2 leaf nodes under it, so we can span the entirety of the right + * hand node. + */ + build_full_tree(&tree, 0, 3); + + /* Descend into position at depth 2. */ + mas_reset(&mas); + mas_start(&mas); + mas_descend(&mas); + mas_descend(&mas); + + /* + * We need to establish a tree like the below. + * + * Then we can try a store in [from, to] which results in a spanned + * store across nodes B and C, with the maple state at the time of the + * write being such that only the subtree at A and below is considered. + * + * Height + * 0 Root Node + * / \ + * pivot = to / \ pivot = ULONG_MAX + * / \ + * 1 A [-----] ... + * / \ + * pivot = from / \ pivot = to + * / \ + * 2 (LEAVES) B [-----] [-----] C + * ^--- Last pivot to. + */ + while (true) { + unsigned long tmp = get_last_index(&mas); + + if (mas_next_sibling(&mas)) { + from = tmp; + to = mas.max; + } else { + break; + } + } + + BUG_ON(from == 0 && to == 0); + + /* Perform the store. */ + mas_set_range(&mas, from, to); + mas_store_gfp(&mas, xa_mk_value(0xdead), GFP_KERNEL); + + /* If the regression occurs, the validation will fail. */ + mt_validate(&tree); + + /* Cleanup. */ + __mt_destroy(&tree); +} + +static void regression_tests(void) +{ + test_spanning_store_regression(); +} + void maple_tree_tests(void) { +#if !defined(BENCH) + regression_tests(); farmer_tests(); +#endif maple_tree_seed(); maple_tree_harvest(); } |
