diff options
Diffstat (limited to 'lib/maple_tree.c')
| -rw-r--r-- | lib/maple_tree.c | 3733 |
1 files changed, 1868 insertions, 1865 deletions
diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 4dd73cf936a6..5aa4c9500018 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -4,6 +4,8 @@ * Copyright (c) 2018-2022 Oracle Corporation * Authors: Liam R. Howlett <Liam.Howlett@oracle.com> * Matthew Wilcox <willy@infradead.org> + * Copyright (c) 2023 ByteDance + * Author: Peng Zhang <zhangpeng.00@bytedance.com> */ /* @@ -14,8 +16,8 @@ * and are simply the slot index + the minimum of the node. * * In regular B-Tree terms, pivots are called keys. The term pivot is used to - * indicate that the tree is specifying ranges, Pivots may appear in the - * subtree with an entry attached to the value where as keys are unique to a + * indicate that the tree is specifying ranges. Pivots may appear in the + * subtree with an entry attached to the value whereas keys are unique to a * specific position of a B-tree. Pivot values are inclusive of the slot with * the same index. * @@ -62,19 +64,33 @@ #define CREATE_TRACE_POINTS #include <trace/events/maple_tree.h> +#define TP_FCT tracepoint_string(__func__) + +/* + * Kernel pointer hashing renders much of the maple tree dump useless as tagged + * pointers get hashed to arbitrary values. + * + * If CONFIG_DEBUG_VM_MAPLE_TREE is set we are in a debug mode where it is + * permissible to bypass this. Otherwise remain cautious and retain the hashing. + * + * Userland doesn't know about %px so also use %p there. + */ +#if defined(__KERNEL__) && defined(CONFIG_DEBUG_VM_MAPLE_TREE) +#define PTR_FMT "%px" +#else +#define PTR_FMT "%p" +#endif + #define MA_ROOT_PARENT 1 /* * Maple state flags - * * MA_STATE_BULK - Bulk insert mode - * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation */ -#define MA_STATE_BULK 1 -#define MA_STATE_REBALANCE 2 -#define MA_STATE_PREALLOC 4 +#define MA_STATE_PREALLOC 1 #define ma_parent_ptr(x) ((struct maple_pnode *)(x)) +#define mas_tree_parent(x) ((unsigned long)(x->tree) | MA_ROOT_PARENT) #define ma_mnode_ptr(x) ((struct maple_node *)(x)) #define ma_enode_ptr(x) ((struct maple_enode *)(x)) static struct kmem_cache *maple_node_cache; @@ -117,7 +133,6 @@ static const unsigned char mt_min_slots[] = { #define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1) struct maple_big_node { - struct maple_pnode *parent; unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1]; union { struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS]; @@ -159,21 +174,25 @@ static inline struct maple_node *mt_alloc_one(gfp_t gfp) return kmem_cache_alloc(maple_node_cache, gfp); } -static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes) +static inline void mt_free_bulk(size_t size, void __rcu **nodes) { - return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes); + kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes); } -static inline void mt_free_bulk(size_t size, void __rcu **nodes) +static void mt_return_sheaf(struct slab_sheaf *sheaf) { - kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes); + kmem_cache_return_sheaf(maple_node_cache, GFP_NOWAIT, sheaf); } -static void mt_free_rcu(struct rcu_head *head) +static struct slab_sheaf *mt_get_sheaf(gfp_t gfp, int count) { - struct maple_node *node = container_of(head, struct maple_node, rcu); + return kmem_cache_prefill_sheaf(maple_node_cache, gfp, count); +} - kmem_cache_free(maple_node_cache, node); +static int mt_refill_sheaf(gfp_t gfp, struct slab_sheaf **sheaf, + unsigned int size) +{ + return kmem_cache_refill_sheaf(maple_node_cache, gfp, sheaf, size); } /* @@ -186,17 +205,17 @@ static void mt_free_rcu(struct rcu_head *head) static void ma_free_rcu(struct maple_node *node) { WARN_ON(node->parent != ma_parent_ptr(node)); - call_rcu(&node->rcu, mt_free_rcu); + kfree_rcu(node, rcu); } -static void mas_set_height(struct ma_state *mas) +static void mt_set_height(struct maple_tree *mt, unsigned char height) { - unsigned int new_flags = mas->tree->ma_flags; + unsigned int new_flags = mt->ma_flags; new_flags &= ~MT_FLAGS_HEIGHT_MASK; - MAS_BUG_ON(mas, mas->depth > MAPLE_HEIGHT_MAX); - new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET; - mas->tree->ma_flags = new_flags; + MT_BUG_ON(mt, height > MAPLE_HEIGHT_MAX); + new_flags |= height << MT_FLAGS_HEIGHT_OFFSET; + mt->ma_flags = new_flags; } static unsigned int mas_mt_height(struct ma_state *mas) @@ -204,23 +223,29 @@ static unsigned int mas_mt_height(struct ma_state *mas) return mt_height(mas->tree); } -static inline enum maple_type mte_node_type(const struct maple_enode *entry) +static inline unsigned int mt_attr(struct maple_tree *mt) +{ + return mt->ma_flags & ~MT_FLAGS_HEIGHT_MASK; +} + +static __always_inline enum maple_type mte_node_type( + const struct maple_enode *entry) { return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) & MAPLE_NODE_TYPE_MASK; } -static inline bool ma_is_dense(const enum maple_type type) +static __always_inline bool ma_is_dense(const enum maple_type type) { return type < maple_leaf_64; } -static inline bool ma_is_leaf(const enum maple_type type) +static __always_inline bool ma_is_leaf(const enum maple_type type) { return type < maple_range_64; } -static inline bool mte_is_leaf(const struct maple_enode *entry) +static __always_inline bool mte_is_leaf(const struct maple_enode *entry) { return ma_is_leaf(mte_node_type(entry)); } @@ -229,44 +254,50 @@ static inline bool mte_is_leaf(const struct maple_enode *entry) * We also reserve values with the bottom two bits set to '10' which are * below 4096 */ -static inline bool mt_is_reserved(const void *entry) +static __always_inline bool mt_is_reserved(const void *entry) { return ((unsigned long)entry < MAPLE_RESERVED_RANGE) && xa_is_internal(entry); } -static inline void mas_set_err(struct ma_state *mas, long err) +static __always_inline void mas_set_err(struct ma_state *mas, long err) { mas->node = MA_ERROR(err); + mas->status = ma_error; } -static inline bool mas_is_ptr(const struct ma_state *mas) +static __always_inline bool mas_is_ptr(const struct ma_state *mas) { - return mas->node == MAS_ROOT; + return mas->status == ma_root; } -static inline bool mas_is_start(const struct ma_state *mas) +static __always_inline bool mas_is_start(const struct ma_state *mas) { - return mas->node == MAS_START; + return mas->status == ma_start; } -bool mas_is_err(struct ma_state *mas) +static __always_inline bool mas_is_none(const struct ma_state *mas) { - return xa_is_err(mas->node); + return mas->status == ma_none; } -static inline bool mas_searchable(struct ma_state *mas) +static __always_inline bool mas_is_paused(const struct ma_state *mas) { - if (mas_is_none(mas)) - return false; + return mas->status == ma_pause; +} - if (mas_is_ptr(mas)) - return false; +static __always_inline bool mas_is_overflow(struct ma_state *mas) +{ + return mas->status == ma_overflow; +} - return true; +static inline bool mas_is_underflow(struct ma_state *mas) +{ + return mas->status == ma_underflow; } -static inline struct maple_node *mte_to_node(const struct maple_enode *entry) +static __always_inline struct maple_node *mte_to_node( + const struct maple_enode *entry) { return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK); } @@ -328,27 +359,27 @@ static inline void *mte_safe_root(const struct maple_enode *node) return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE); } -static inline void *mte_set_full(const struct maple_enode *node) +static inline void __maybe_unused *mte_set_full(const struct maple_enode *node) { return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL); } -static inline void *mte_clear_full(const struct maple_enode *node) +static inline void __maybe_unused *mte_clear_full(const struct maple_enode *node) { return (void *)((unsigned long)node | MAPLE_ENODE_NULL); } -static inline bool mte_has_null(const struct maple_enode *node) +static inline bool __maybe_unused mte_has_null(const struct maple_enode *node) { return (unsigned long)node & MAPLE_ENODE_NULL; } -static inline bool ma_is_root(struct maple_node *node) +static __always_inline bool ma_is_root(struct maple_node *node) { return ((unsigned long)node->parent & MA_ROOT_PARENT); } -static inline bool mte_is_root(const struct maple_enode *node) +static __always_inline bool mte_is_root(const struct maple_enode *node) { return ma_is_root(mte_to_node(node)); } @@ -358,7 +389,7 @@ static inline bool mas_is_root_limits(const struct ma_state *mas) return !mas->min && mas->max == ULONG_MAX; } -static inline bool mt_is_alloc(struct maple_tree *mt) +static __always_inline bool mt_is_alloc(struct maple_tree *mt) { return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE); } @@ -371,11 +402,11 @@ static inline bool mt_is_alloc(struct maple_tree *mt) * a reuse of the last bit in the node type. This is possible by using bit 1 to * indicate if bit 2 is part of the type or the slot. * - * Note types: - * 0x??1 = Root - * 0x?00 = 16 bit nodes - * 0x010 = 32 bit nodes - * 0x110 = 64 bit nodes + * Node types: + * 0b??1 = Root + * 0b?00 = 16 bit nodes + * 0b010 = 32 bit nodes + * 0b110 = 64 bit nodes * * Slot size and alignment * 0b??1 : Root @@ -393,7 +424,7 @@ static inline bool mt_is_alloc(struct maple_tree *mt) #define MAPLE_PARENT_16B_SLOT_MASK 0xFC #define MAPLE_PARENT_RANGE64 0x06 -#define MAPLE_PARENT_RANGE32 0x04 +#define MAPLE_PARENT_RANGE32 0x02 #define MAPLE_PARENT_NOT_RANGE16 0x02 /* @@ -454,6 +485,7 @@ enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode) /* * mas_set_parent() - Set the parent node and encode the slot + * @mas: The maple state * @enode: The encoded maple node. * @parent: The encoded maple node that is the parent of @enode. * @slot: The slot that @enode resides in @parent. @@ -497,11 +529,12 @@ void mas_set_parent(struct ma_state *mas, struct maple_enode *enode, * * Return: The slot in the parent node where @enode resides. */ -static inline unsigned int mte_parent_slot(const struct maple_enode *enode) +static __always_inline +unsigned int mte_parent_slot(const struct maple_enode *enode) { unsigned long val = (unsigned long)mte_to_node(enode)->parent; - if (val & MA_ROOT_PARENT) + if (unlikely(val & MA_ROOT_PARENT)) return 0; /* @@ -513,11 +546,12 @@ static inline unsigned int mte_parent_slot(const struct maple_enode *enode) /* * mte_parent() - Get the parent of @node. - * @node: The encoded maple node. + * @enode: The encoded maple node. * * Return: The parent maple node. */ -static inline struct maple_node *mte_parent(const struct maple_enode *enode) +static __always_inline +struct maple_node *mte_parent(const struct maple_enode *enode) { return (void *)((unsigned long) (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK); @@ -529,7 +563,7 @@ static inline struct maple_node *mte_parent(const struct maple_enode *enode) * * Return: true if dead, false otherwise. */ -static inline bool ma_dead_node(const struct maple_node *node) +static __always_inline bool ma_dead_node(const struct maple_node *node) { struct maple_node *parent; @@ -545,82 +579,18 @@ static inline bool ma_dead_node(const struct maple_node *node) * * Return: true if dead, false otherwise. */ -static inline bool mte_dead_node(const struct maple_enode *enode) +static __always_inline bool mte_dead_node(const struct maple_enode *enode) { - struct maple_node *parent, *node; + struct maple_node *node; node = mte_to_node(enode); - /* Do not reorder reads from the node prior to the parent check */ - smp_rmb(); - parent = mte_parent(enode); - return (parent == node); -} - -/* - * mas_allocated() - Get the number of nodes allocated in a maple state. - * @mas: The maple state - * - * The ma_state alloc member is overloaded to hold a pointer to the first - * allocated node or to the number of requested nodes to allocate. If bit 0 is - * set, then the alloc contains the number of requested nodes. If there is an - * allocated node, then the total allocated nodes is in that node. - * - * Return: The total number of nodes allocated - */ -static inline unsigned long mas_allocated(const struct ma_state *mas) -{ - if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) - return 0; - - return mas->alloc->total; -} - -/* - * mas_set_alloc_req() - Set the requested number of allocations. - * @mas: the maple state - * @count: the number of allocations. - * - * The requested number of allocations is either in the first allocated node, - * located in @mas->alloc->request_count, or directly in @mas->alloc if there is - * no allocated node. Set the request either in the node or do the necessary - * encoding to store in @mas->alloc directly. - */ -static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count) -{ - if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) { - if (!count) - mas->alloc = NULL; - else - mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U); - return; - } - - mas->alloc->request_count = count; -} - -/* - * mas_alloc_req() - get the requested number of allocations. - * @mas: The maple state - * - * The alloc count is either stored directly in @mas, or in - * @mas->alloc->request_count if there is at least one node allocated. Decode - * the request count if it's stored directly in @mas->alloc. - * - * Return: The allocation request count. - */ -static inline unsigned int mas_alloc_req(const struct ma_state *mas) -{ - if ((unsigned long)mas->alloc & 0x1) - return (unsigned long)(mas->alloc) >> 1; - else if (mas->alloc) - return mas->alloc->request_count; - return 0; + return ma_dead_node(node); } /* * ma_pivots() - Get a pointer to the maple node pivots. - * @node - the maple node - * @type - the node type + * @node: the maple node + * @type: the node type * * In the event of a dead node, this array may be %NULL * @@ -643,8 +613,8 @@ static inline unsigned long *ma_pivots(struct maple_node *node, /* * ma_gaps() - Get a pointer to the maple node gaps. - * @node - the maple node - * @type - the node type + * @node: the maple node + * @type: the node type * * Return: A pointer to the maple node gaps */ @@ -663,35 +633,6 @@ static inline unsigned long *ma_gaps(struct maple_node *node, } /* - * mas_pivot() - Get the pivot at @piv of the maple encoded node. - * @mas: The maple state. - * @piv: The pivot. - * - * Return: the pivot at @piv of @mn. - */ -static inline unsigned long mas_pivot(struct ma_state *mas, unsigned char piv) -{ - struct maple_node *node = mas_mn(mas); - enum maple_type type = mte_node_type(mas->node); - - if (MAS_WARN_ON(mas, piv >= mt_pivots[type])) { - mas_set_err(mas, -EIO); - return 0; - } - - switch (type) { - case maple_arange_64: - return node->ma64.pivot[piv]; - case maple_range_64: - case maple_leaf_64: - return node->mr64.pivot[piv]; - case maple_dense: - return 0; - } - return 0; -} - -/* * mas_safe_pivot() - get the pivot at @piv or mas->max. * @mas: The maple state * @pivots: The pointer to the maple node pivots @@ -701,7 +642,7 @@ static inline unsigned long mas_pivot(struct ma_state *mas, unsigned char piv) * Return: The pivot at @piv within the limit of the @pivots array, @mas->max * otherwise. */ -static inline unsigned long +static __always_inline unsigned long mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots, unsigned char piv, enum maple_type type) { @@ -729,33 +670,6 @@ mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset) } /* - * mas_logical_pivot() - Get the logical pivot of a given offset. - * @mas: The maple state - * @pivots: The pointer to the maple node pivots - * @offset: The offset into the pivot array - * @type: The maple node type - * - * When there is no value at a pivot (beyond the end of the data), then the - * pivot is actually @mas->max. - * - * Return: the logical pivot of a given @offset. - */ -static inline unsigned long -mas_logical_pivot(struct ma_state *mas, unsigned long *pivots, - unsigned char offset, enum maple_type type) -{ - unsigned long lpiv = mas_safe_pivot(mas, pivots, offset, type); - - if (likely(lpiv)) - return lpiv; - - if (likely(offset)) - return mas->max; - - return lpiv; -} - -/* * mte_set_pivot() - Set a pivot to a value in an encoded maple node. * @mn: The encoded maple node * @piv: The pivot offset @@ -769,7 +683,6 @@ static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv, BUG_ON(piv >= mt_pivots[type]); switch (type) { - default: case maple_range_64: case maple_leaf_64: node->mr64.pivot[piv] = val; @@ -793,7 +706,6 @@ static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv, static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt) { switch (mt) { - default: case maple_arange_64: return mn->ma64.slot; case maple_range_64: @@ -802,24 +714,32 @@ static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt) case maple_dense: return mn->slot; } + + return NULL; } -static inline bool mt_locked(const struct maple_tree *mt) +static inline bool mt_write_locked(const struct maple_tree *mt) +{ + return mt_external_lock(mt) ? mt_write_lock_is_held(mt) : + lockdep_is_held(&mt->ma_lock); +} + +static __always_inline bool mt_locked(const struct maple_tree *mt) { return mt_external_lock(mt) ? mt_lock_is_held(mt) : lockdep_is_held(&mt->ma_lock); } -static inline void *mt_slot(const struct maple_tree *mt, +static __always_inline void *mt_slot(const struct maple_tree *mt, void __rcu **slots, unsigned char offset) { return rcu_dereference_check(slots[offset], mt_locked(mt)); } -static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots, - unsigned char offset) +static __always_inline void *mt_slot_locked(struct maple_tree *mt, + void __rcu **slots, unsigned char offset) { - return rcu_dereference_protected(slots[offset], mt_locked(mt)); + return rcu_dereference_protected(slots[offset], mt_write_locked(mt)); } /* * mas_slot_locked() - Get the slot value when holding the maple tree lock. @@ -829,8 +749,8 @@ static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots, * * Return: The entry stored in @slots at the @offset. */ -static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots, - unsigned char offset) +static __always_inline void *mas_slot_locked(struct ma_state *mas, + void __rcu **slots, unsigned char offset) { return mt_slot_locked(mas->tree, slots, offset); } @@ -843,8 +763,8 @@ static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots, * * Return: The entry stored in @slots at the @offset */ -static inline void *mas_slot(struct ma_state *mas, void __rcu **slots, - unsigned char offset) +static __always_inline void *mas_slot(struct ma_state *mas, void __rcu **slots, + unsigned char offset) { return mt_slot(mas->tree, slots, offset); } @@ -855,14 +775,14 @@ static inline void *mas_slot(struct ma_state *mas, void __rcu **slots, * * Return: The pointer to the root of the tree */ -static inline void *mas_root(struct ma_state *mas) +static __always_inline void *mas_root(struct ma_state *mas) { return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree)); } static inline void *mt_root_locked(struct maple_tree *mt) { - return rcu_dereference_protected(mt->ma_root, mt_locked(mt)); + return rcu_dereference_protected(mt->ma_root, mt_write_locked(mt)); } /* @@ -908,8 +828,6 @@ static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt, * @mt: The maple tree * @mn: The maple node * @type: The maple node type - * @offset: The offset of the highest sub-gap in this node. - * @end: The end of the data in this node. */ static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn, enum maple_type type) @@ -958,10 +876,8 @@ static inline unsigned char ma_meta_end(struct maple_node *mn, /* * ma_meta_gap() - Get the largest gap location of a node from the metadata * @mn: The maple node - * @mt: The maple node type */ -static inline unsigned char ma_meta_gap(struct maple_node *mn, - enum maple_type mt) +static inline unsigned char ma_meta_gap(struct maple_node *mn) { return mn->ma64.meta.gap; } @@ -969,7 +885,7 @@ static inline unsigned char ma_meta_gap(struct maple_node *mn, /* * ma_set_meta_gap() - Set the largest gap location in a nodes metadata * @mn: The maple node - * @mn: The maple node type + * @mt: The maple node type * @offset: The location of the largest gap. */ static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt, @@ -983,8 +899,8 @@ static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt, /* * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes. - * @mat - the ma_topiary, a linked list of dead nodes. - * @dead_enode - the node to be marked as dead and added to the tail of the list + * @mat: the ma_topiary, a linked list of dead nodes. + * @dead_enode: the node to be marked as dead and added to the tail of the list * * Add the @dead_enode to the linked list in @mat. */ @@ -1002,47 +918,34 @@ static inline void mat_add(struct ma_topiary *mat, mat->tail = dead_enode; } -static void mte_destroy_walk(struct maple_enode *, struct maple_tree *); -static inline void mas_free(struct ma_state *mas, struct maple_enode *used); - -/* - * mas_mat_free() - Free all nodes in a dead list. - * @mas - the maple state - * @mat - the ma_topiary linked list of dead nodes to free. - * - * Free walk a dead list. - */ -static void mas_mat_free(struct ma_state *mas, struct ma_topiary *mat) -{ - struct maple_enode *next; - - while (mat->head) { - next = mte_to_mat(mat->head)->next; - mas_free(mas, mat->head); - mat->head = next; - } -} - +static void mt_free_walk(struct rcu_head *head); +static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt, + bool free); /* * mas_mat_destroy() - Free all nodes and subtrees in a dead list. - * @mas - the maple state - * @mat - the ma_topiary linked list of dead nodes to free. + * @mas: the maple state + * @mat: the ma_topiary linked list of dead nodes to free. * * Destroy walk a dead list. */ static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat) { struct maple_enode *next; + struct maple_node *node; + bool in_rcu = mt_in_rcu(mas->tree); while (mat->head) { next = mte_to_mat(mat->head)->next; - mte_destroy_walk(mat->head, mat->mtree); + node = mte_to_node(mat->head); + mt_destroy_walk(mat->head, mas->tree, !in_rcu); + if (in_rcu) + call_rcu(&node->rcu, mt_free_walk); mat->head = next; } } /* * mas_descend() - Descend into the slot stored in the ma_state. - * @mas - the maple state. + * @mas: the maple state. * * Note: Not RCU safe, only use in write side or debug code. */ @@ -1065,28 +968,10 @@ static inline void mas_descend(struct ma_state *mas) } /* - * mte_set_gap() - Set a maple node gap. - * @mn: The encoded maple node - * @gap: The offset of the gap to set - * @val: The gap value - */ -static inline void mte_set_gap(const struct maple_enode *mn, - unsigned char gap, unsigned long val) -{ - switch (mte_node_type(mn)) { - default: - break; - case maple_arange_64: - mte_to_node(mn)->ma64.gap[gap] = val; - break; - } -} - -/* * mas_ascend() - Walk up a level of the tree. * @mas: The maple state * - * Sets the @mas->max and @mas->min to the correct values when walking up. This + * Sets the @mas->max and @mas->min for the parent node of mas->node. This * may cause several levels of walking up to find the correct min and max. * May find a dead node which will cause a premature return. * Return: 1 on dead node, 0 otherwise @@ -1129,14 +1014,22 @@ static int mas_ascend(struct ma_state *mas) return 0; } - if (!mas->min) + min = 0; + max = ULONG_MAX; + + /* + * !mas->offset implies that parent node min == mas->min. + * mas->offset > 0 implies that we need to walk up to find the + * implied pivot min. + */ + if (!mas->offset) { + min = mas->min; set_min = true; + } if (mas->max == ULONG_MAX) set_max = true; - min = 0; - max = ULONG_MAX; do { p_enode = a_enode; a_type = mas_parent_type(mas, p_enode); @@ -1177,81 +1070,24 @@ static int mas_ascend(struct ma_state *mas) * * Return: A pointer to a maple node. */ -static inline struct maple_node *mas_pop_node(struct ma_state *mas) +static __always_inline struct maple_node *mas_pop_node(struct ma_state *mas) { - struct maple_alloc *ret, *node = mas->alloc; - unsigned long total = mas_allocated(mas); - unsigned int req = mas_alloc_req(mas); + struct maple_node *ret; - /* nothing or a request pending. */ - if (WARN_ON(!total)) - return NULL; - - if (total == 1) { - /* single allocation in this ma_state */ + if (mas->alloc) { + ret = mas->alloc; mas->alloc = NULL; - ret = node; - goto single_node; + goto out; } - if (node->node_count == 1) { - /* Single allocation in this node. */ - mas->alloc = node->slot[0]; - mas->alloc->total = node->total - 1; - ret = node; - goto new_head; - } - node->total--; - ret = node->slot[--node->node_count]; - node->slot[node->node_count] = NULL; + if (WARN_ON_ONCE(!mas->sheaf)) + return NULL; -single_node: -new_head: - if (req) { - req++; - mas_set_alloc_req(mas, req); - } + ret = kmem_cache_alloc_from_sheaf(maple_node_cache, GFP_NOWAIT, mas->sheaf); +out: memset(ret, 0, sizeof(*ret)); - return (struct maple_node *)ret; -} - -/* - * mas_push_node() - Push a node back on the maple state allocation. - * @mas: The maple state - * @used: The used maple node - * - * Stores the maple node back into @mas->alloc for reuse. Updates allocated and - * requested node count as necessary. - */ -static inline void mas_push_node(struct ma_state *mas, struct maple_node *used) -{ - struct maple_alloc *reuse = (struct maple_alloc *)used; - struct maple_alloc *head = mas->alloc; - unsigned long count; - unsigned int requested = mas_alloc_req(mas); - - count = mas_allocated(mas); - - reuse->request_count = 0; - reuse->node_count = 0; - if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) { - head->slot[head->node_count++] = reuse; - head->total++; - goto done; - } - - reuse->total = 1; - if ((head) && !((unsigned long)head & 0x1)) { - reuse->slot[0] = head; - reuse->node_count = 1; - reuse->total += head->total; - } - - mas->alloc = reuse; -done: - if (requested > 1) - mas_set_alloc_req(mas, requested - 1); + return ret; } /* @@ -1261,133 +1097,95 @@ done: */ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) { - struct maple_alloc *node; - unsigned long allocated = mas_allocated(mas); - unsigned int requested = mas_alloc_req(mas); - unsigned int count; - void **slots = NULL; - unsigned int max_req = 0; - - if (!requested) + if (!mas->node_request) return; - mas_set_alloc_req(mas, 0); - if (mas->mas_flags & MA_STATE_PREALLOC) { - if (allocated) + if (mas->node_request == 1) { + if (mas->sheaf) + goto use_sheaf; + + if (mas->alloc) return; - WARN_ON(!allocated); - } - if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) { - node = (struct maple_alloc *)mt_alloc_one(gfp); - if (!node) - goto nomem_one; + mas->alloc = mt_alloc_one(gfp); + if (!mas->alloc) + goto error; - if (allocated) { - node->slot[0] = mas->alloc; - node->node_count = 1; - } else { - node->node_count = 0; - } + mas->node_request = 0; + return; + } - mas->alloc = node; - node->total = ++allocated; - requested--; +use_sheaf: + if (unlikely(mas->alloc)) { + kfree(mas->alloc); + mas->alloc = NULL; } - node = mas->alloc; - node->request_count = 0; - while (requested) { - max_req = MAPLE_ALLOC_SLOTS - node->node_count; - slots = (void **)&node->slot[node->node_count]; - max_req = min(requested, max_req); - count = mt_alloc_bulk(gfp, max_req, slots); - if (!count) - goto nomem_bulk; + if (mas->sheaf) { + unsigned long refill; - if (node->node_count == 0) { - node->slot[0]->node_count = 0; - node->slot[0]->request_count = 0; + refill = mas->node_request; + if (kmem_cache_sheaf_size(mas->sheaf) >= refill) { + mas->node_request = 0; + return; } - node->node_count += count; - allocated += count; - node = node->slot[0]; - requested -= count; - } - mas->alloc->total = allocated; - return; + if (mt_refill_sheaf(gfp, &mas->sheaf, refill)) + goto error; -nomem_bulk: - /* Clean up potential freed allocations on bulk failure */ - memset(slots, 0, max_req * sizeof(unsigned long)); -nomem_one: - mas_set_alloc_req(mas, requested); - if (mas->alloc && !(((unsigned long)mas->alloc & 0x1))) - mas->alloc->total = allocated; - mas_set_err(mas, -ENOMEM); -} + mas->node_request = 0; + return; + } -/* - * mas_free() - Free an encoded maple node - * @mas: The maple state - * @used: The encoded maple node to free. - * - * Uses rcu free if necessary, pushes @used back on the maple state allocations - * otherwise. - */ -static inline void mas_free(struct ma_state *mas, struct maple_enode *used) -{ - struct maple_node *tmp = mte_to_node(used); + mas->sheaf = mt_get_sheaf(gfp, mas->node_request); + if (likely(mas->sheaf)) { + mas->node_request = 0; + return; + } - if (mt_in_rcu(mas->tree)) - ma_free_rcu(tmp); - else - mas_push_node(mas, tmp); +error: + mas_set_err(mas, -ENOMEM); } -/* - * mas_node_count() - Check if enough nodes are allocated and request more if - * there is not enough nodes. - * @mas: The maple state - * @count: The number of nodes needed - * @gfp: the gfp flags - */ -static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp) +static inline void mas_empty_nodes(struct ma_state *mas) { - unsigned long allocated = mas_allocated(mas); + mas->node_request = 0; + if (mas->sheaf) { + mt_return_sheaf(mas->sheaf); + mas->sheaf = NULL; + } - if (allocated < count) { - mas_set_alloc_req(mas, count - allocated); - mas_alloc_nodes(mas, gfp); + if (mas->alloc) { + kfree(mas->alloc); + mas->alloc = NULL; } } /* - * mas_node_count() - Check if enough nodes are allocated and request more if - * there is not enough nodes. + * mas_free() - Free an encoded maple node * @mas: The maple state - * @count: The number of nodes needed + * @used: The encoded maple node to free. * - * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags. + * Uses rcu free if necessary, pushes @used back on the maple state allocations + * otherwise. */ -static void mas_node_count(struct ma_state *mas, int count) +static inline void mas_free(struct ma_state *mas, struct maple_enode *used) { - return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN); + ma_free_rcu(mte_to_node(used)); } /* * mas_start() - Sets up maple state for operations. * @mas: The maple state. * - * If mas->node == MAS_START, then set the min, max and depth to + * If mas->status == ma_start, then set the min, max and depth to * defaults. * * Return: - * - If mas->node is an error or not MAS_START, return NULL. - * - If it's an empty tree: NULL & mas->node == MAS_NONE - * - If it's a single entry: The entry & mas->node == MAS_ROOT - * - If it's a tree: NULL & mas->node == safe root node. + * - If mas->node is an error or not mas_start, return NULL. + * - If it's an empty tree: NULL & mas->status == ma_none + * - If it's a single entry: The entry & mas->status == ma_root + * - If it's a tree: NULL & mas->status == ma_active */ static inline struct maple_enode *mas_start(struct ma_state *mas) { @@ -1402,7 +1200,8 @@ retry: root = mas_root(mas); /* Tree with nodes */ if (likely(xa_is_node(root))) { - mas->depth = 1; + mas->depth = 0; + mas->status = ma_active; mas->node = mte_safe_root(root); mas->offset = 0; if (mte_dead_node(mas->node)) @@ -1411,15 +1210,16 @@ retry: return NULL; } + mas->node = NULL; /* empty tree */ if (unlikely(!root)) { - mas->node = MAS_NONE; + mas->status = ma_none; mas->offset = MAPLE_NODE_SLOTS; return NULL; } /* Single entry tree */ - mas->node = MAS_ROOT; + mas->status = ma_root; mas->offset = MAPLE_NODE_SLOTS; /* Single entry tree. */ @@ -1442,10 +1242,8 @@ retry: * Uses metadata to find the end of the data when possible. * Return: The zero indexed last slot with data (may be null). */ -static inline unsigned char ma_data_end(struct maple_node *node, - enum maple_type type, - unsigned long *pivots, - unsigned long max) +static __always_inline unsigned char ma_data_end(struct maple_node *node, + enum maple_type type, unsigned long *pivots, unsigned long max) { unsigned char offset; @@ -1502,7 +1300,7 @@ static inline unsigned char mas_data_end(struct ma_state *mas) /* * mas_leaf_max_gap() - Returns the largest gap in a leaf node - * @mas - the maple state + * @mas: the maple state * * Return: The maximum gap in the leaf. */ @@ -1558,6 +1356,9 @@ static unsigned long mas_leaf_max_gap(struct ma_state *mas) gap = ULONG_MAX - pivots[max_piv]; if (gap > max_gap) max_gap = gap; + + if (max_gap > pivots[max_piv] - mas->min) + return max_gap; } for (; i <= max_piv; i++) { @@ -1581,7 +1382,7 @@ static unsigned long mas_leaf_max_gap(struct ma_state *mas) * @node: The maple node * @gaps: The pointer to the gaps * @mt: The maple node type - * @*off: Pointer to store the offset location of the gap. + * @off: Pointer to store the offset location of the gap. * * Uses the metadata data end to scan backwards across set gaps. * @@ -1610,8 +1411,6 @@ ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt, * mas_max_gap() - find the largest gap in a non-leaf node and set the slot. * @mas: The maple state. * - * If the metadata gap is set to MAPLE_ARANGE64_META_MAX, there is no gap. - * * Return: The gap value. */ static inline unsigned long mas_max_gap(struct ma_state *mas) @@ -1627,10 +1426,7 @@ static inline unsigned long mas_max_gap(struct ma_state *mas) node = mas_mn(mas); MAS_BUG_ON(mas, mt != maple_arange_64); - offset = ma_meta_gap(node, mt); - if (offset == MAPLE_ARANGE64_META_MAX) - return 0; - + offset = ma_meta_gap(node); gaps = ma_gaps(node, mt); return gaps[offset]; } @@ -1661,11 +1457,8 @@ static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset, ascend: MAS_BUG_ON(mas, pmt != maple_arange_64); - meta_offset = ma_meta_gap(pnode, pmt); - if (meta_offset == MAPLE_ARANGE64_META_MAX) - meta_gap = 0; - else - meta_gap = pgaps[meta_offset]; + meta_offset = ma_meta_gap(pnode); + meta_gap = pgaps[meta_offset]; pgaps[offset] = new; @@ -1678,7 +1471,6 @@ ascend: ma_set_meta_gap(pnode, pmt, offset); } else if (new < meta_gap) { - meta_offset = 15; new = ma_max_gap(pnode, pgaps, pmt, &meta_offset); ma_set_meta_gap(pnode, pmt, meta_offset); } @@ -1697,7 +1489,7 @@ ascend: /* * mas_update_gap() - Update a nodes gaps and propagate up if necessary. - * @mas - the maple state. + * @mas: the maple state. */ static inline void mas_update_gap(struct ma_state *mas) { @@ -1724,14 +1516,14 @@ static inline void mas_update_gap(struct ma_state *mas) /* * mas_adopt_children() - Set the parent pointer of all nodes in @parent to * @parent with the slot encoded. - * @mas - the maple state (for the tree) - * @parent - the maple encoded node containing the children. + * @mas: the maple state (for the tree) + * @parent: the maple encoded node containing the children. */ static inline void mas_adopt_children(struct ma_state *mas, struct maple_enode *parent) { enum maple_type type = mte_node_type(parent); - struct maple_node *node = mas_mn(mas); + struct maple_node *node = mte_to_node(parent); void __rcu **slots = ma_slots(node, type); unsigned long *pivots = ma_pivots(node, type); struct maple_enode *child; @@ -1745,53 +1537,56 @@ static inline void mas_adopt_children(struct ma_state *mas, } /* - * mas_replace() - Replace a maple node in the tree with mas->node. Uses the - * parent encoding to locate the maple node in the tree. - * @mas - the ma_state to use for operations. - * @advanced - boolean to adopt the child nodes and free the old node (false) or - * leave the node (true) and handle the adoption and free elsewhere. + * mas_put_in_tree() - Put a new node in the tree, smp_wmb(), and mark the old + * node as dead. + * @mas: the maple state with the new node + * @old_enode: The old maple encoded node to replace. + * @new_height: if we are inserting a root node, update the height of the tree */ -static inline void mas_replace(struct ma_state *mas, bool advanced) +static inline void mas_put_in_tree(struct ma_state *mas, + struct maple_enode *old_enode, char new_height) __must_hold(mas->tree->ma_lock) { - struct maple_node *mn = mas_mn(mas); - struct maple_enode *old_enode; - unsigned char offset = 0; - void __rcu **slots = NULL; + unsigned char offset; + void __rcu **slots; - if (ma_is_root(mn)) { - old_enode = mas_root_locked(mas); + if (mte_is_root(mas->node)) { + mas_mn(mas)->parent = ma_parent_ptr(mas_tree_parent(mas)); + rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); + mt_set_height(mas->tree, new_height); } else { + offset = mte_parent_slot(mas->node); slots = ma_slots(mte_parent(mas->node), mas_parent_type(mas, mas->node)); - old_enode = mas_slot_locked(mas, slots, offset); - } - - if (!advanced && !mte_is_leaf(mas->node)) - mas_adopt_children(mas, mas->node); - - if (mte_is_root(mas->node)) { - mn->parent = ma_parent_ptr( - ((unsigned long)mas->tree | MA_ROOT_PARENT)); - rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); - mas_set_height(mas); - } else { rcu_assign_pointer(slots[offset], mas->node); } - if (!advanced) { - mte_set_node_dead(old_enode); - mas_free(mas, old_enode); - } + mte_set_node_dead(old_enode); } /* - * mas_new_child() - Find the new child of a node. - * @mas: the maple state + * mas_replace_node() - Replace a node by putting it in the tree, marking it + * dead, and freeing it. + * the parent encoding to locate the maple node in the tree. + * @mas: the ma_state with @mas->node pointing to the new node. + * @old_enode: The old maple encoded node. + * @new_height: The new height of the tree as a result of the operation + */ +static inline void mas_replace_node(struct ma_state *mas, + struct maple_enode *old_enode, unsigned char new_height) + __must_hold(mas->tree->ma_lock) +{ + mas_put_in_tree(mas, old_enode, new_height); + mas_free(mas, old_enode); +} + +/* + * mas_find_child() - Find a child who has the parent @mas->node. + * @mas: the maple state with the parent. * @child: the maple state to store the child. */ -static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child) +static inline bool mas_find_child(struct ma_state *mas, struct ma_state *child) __must_hold(mas->tree->ma_lock) { enum maple_type mt; @@ -1841,7 +1636,6 @@ static inline void mab_shift_right(struct maple_big_node *b_node, /* * mab_middle_node() - Check if a middle node is needed (unlikely) * @b_node: the maple_big_node that contains the data. - * @size: the amount of data in the b_node * @split: the potential split location * @slot_count: the size that can be stored in a single node being considered. * @@ -1889,38 +1683,25 @@ static inline int mab_no_null_split(struct maple_big_node *b_node, /* * mab_calc_split() - Calculate the split location and if there needs to be two * splits. + * @mas: The maple state * @bn: The maple_big_node with the data * @mid_split: The second split, if required. 0 otherwise. * * Return: The first split location. The middle split is set in @mid_split. */ static inline int mab_calc_split(struct ma_state *mas, - struct maple_big_node *bn, unsigned char *mid_split, unsigned long min) + struct maple_big_node *bn, unsigned char *mid_split) { unsigned char b_end = bn->b_end; int split = b_end / 2; /* Assume equal split. */ - unsigned char slot_min, slot_count = mt_slots[bn->type]; + unsigned char slot_count = mt_slots[bn->type]; /* * To support gap tracking, all NULL entries are kept together and a node cannot * end on a NULL entry, with the exception of the left-most leaf. The * limitation means that the split of a node must be checked for this condition * and be able to put more data in one direction or the other. - */ - if (unlikely((mas->mas_flags & MA_STATE_BULK))) { - *mid_split = 0; - split = b_end - mt_min_slots[bn->type]; - - if (!ma_is_leaf(bn->type)) - return split; - - mas->mas_flags |= MA_STATE_REBALANCE; - if (!bn->slot[split]) - split--; - return split; - } - - /* + * * Although extremely rare, it is possible to enter what is known as the 3-way * split scenario. The 3-way split comes about by means of a store of a range * that overwrites the end and beginning of two full nodes. The result is a set @@ -1932,18 +1713,7 @@ static inline int mab_calc_split(struct ma_state *mas, split = b_end / 3; *mid_split = split * 2; } else { - slot_min = mt_min_slots[bn->type]; - *mid_split = 0; - /* - * Avoid having a range less than the slot count unless it - * causes one node to be deficient. - * NOTE: mt_min_slots is 1 based, b_end and split are zero. - */ - while ((split < slot_count - 1) && - ((bn->pivot[split] - min) < slot_count - 1) && - (b_end - split > slot_min)) - split++; } /* Avoid ending a node on a NULL entry */ @@ -1989,14 +1759,13 @@ static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start, for (; i < piv_end; i++, j++) { b_node->pivot[j] = pivots[i]; if (unlikely(!b_node->pivot[j])) - break; + goto complete; if (unlikely(mas->max == b_node->pivot[j])) goto complete; } - if (likely(i <= mas_end)) - b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt); + b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt); complete: b_node->b_end = ++j; @@ -2012,27 +1781,13 @@ complete: /* * mas_leaf_set_meta() - Set the metadata of a leaf if possible. - * @mas: The maple state * @node: The maple node - * @pivots: pointer to the maple node pivots * @mt: The maple type - * @end: The assumed end - * - * Note, end may be incremented within this function but not modified at the - * source. This is fine since the metadata is the last thing to be stored in a - * node during a write. + * @end: The node end */ -static inline void mas_leaf_set_meta(struct ma_state *mas, - struct maple_node *node, unsigned long *pivots, +static inline void mas_leaf_set_meta(struct maple_node *node, enum maple_type mt, unsigned char end) { - /* There is no room for metadata already */ - if (mt_pivots[mt] <= end) - return; - - if (pivots[end] && pivots[end] < mas->max) - end++; - if (end < mt_slots[mt] - 1) ma_set_meta(node, mt, 0, end); } @@ -2076,7 +1831,7 @@ static inline void mab_mas_cp(struct maple_big_node *b_node, end = j - 1; if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) { unsigned long max_gap = 0; - unsigned char offset = 15; + unsigned char offset = 0; gaps = ma_gaps(node, mt); do { @@ -2089,78 +1844,7 @@ static inline void mab_mas_cp(struct maple_big_node *b_node, ma_set_meta(node, mt, offset, end); } else { - mas_leaf_set_meta(mas, node, pivots, mt, end); - } -} - -/* - * mas_descend_adopt() - Descend through a sub-tree and adopt children. - * @mas: the maple state with the maple encoded node of the sub-tree. - * - * Descend through a sub-tree and adopt children who do not have the correct - * parents set. Follow the parents which have the correct parents as they are - * the new entries which need to be followed to find other incorrectly set - * parents. - */ -static inline void mas_descend_adopt(struct ma_state *mas) -{ - struct ma_state list[3], next[3]; - int i, n; - - /* - * At each level there may be up to 3 correct parent pointers which indicates - * the new nodes which need to be walked to find any new nodes at a lower level. - */ - - for (i = 0; i < 3; i++) { - list[i] = *mas; - list[i].offset = 0; - next[i].offset = 0; - } - next[0] = *mas; - - while (!mte_is_leaf(list[0].node)) { - n = 0; - for (i = 0; i < 3; i++) { - if (mas_is_none(&list[i])) - continue; - - if (i && list[i-1].node == list[i].node) - continue; - - while ((n < 3) && (mas_new_child(&list[i], &next[n]))) - n++; - - mas_adopt_children(&list[i], list[i].node); - } - - while (n < 3) - next[n++].node = MAS_NONE; - - /* descend by setting the list to the children */ - for (i = 0; i < 3; i++) - list[i] = next[i]; - } -} - -/* - * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert. - * @mas: The maple state - * @end: The maple node end - * @mt: The maple node type - */ -static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end, - enum maple_type mt) -{ - if (!(mas->mas_flags & MA_STATE_BULK)) - return; - - if (mte_is_root(mas->node)) - return; - - if (end > mt_min_slots[mt]) { - mas->mas_flags &= ~MA_STATE_REBALANCE; - return; + mas_leaf_set_meta(node, mt, end); } } @@ -2211,11 +1895,8 @@ static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas, goto b_end; /* Handle new range ending before old range ends */ - piv = mas_logical_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type); + piv = mas_safe_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type); if (piv > mas->last) { - if (piv == ULONG_MAX) - mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type); - if (offset_end != slot) wr_mas->content = mas_slot_locked(mas, wr_mas->slots, offset_end); @@ -2227,11 +1908,11 @@ static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas, } slot = offset_end + 1; - if (slot > wr_mas->node_end) + if (slot > mas->end) goto b_end; /* Copy end data to the end of the node. */ - mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end); + mas_mab_cp(mas, slot, mas->end + 1, b_node, ++b_end); b_node->b_end--; return; @@ -2249,9 +1930,7 @@ static inline bool mas_prev_sibling(struct ma_state *mas) { unsigned int p_slot = mte_parent_slot(mas->node); - if (mte_is_root(mas->node)) - return false; - + /* For root node, p_slot is set to 0 by mte_parent_slot(). */ if (!p_slot) return false; @@ -2286,23 +1965,28 @@ static inline bool mas_next_sibling(struct ma_state *mas) } /* - * mte_node_or_node() - Return the encoded node or MAS_NONE. + * mas_node_or_none() - Set the enode and state. + * @mas: the maple state * @enode: The encoded maple node. * - * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state. - * - * Return: @enode or MAS_NONE + * Set the node to the enode and the status. */ -static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode) +static inline void mas_node_or_none(struct ma_state *mas, + struct maple_enode *enode) { - if (enode) - return enode; - - return ma_enode_ptr(MAS_NONE); + if (enode) { + mas->node = enode; + mas->status = ma_active; + } else { + mas->node = NULL; + mas->status = ma_none; + } } /* * mas_wr_node_walk() - Find the correct offset for the index in the @mas. + * If @mas->index cannot be found within the containing + * node, we traverse to the last entry in the node. * @wr_mas: The maple write state * * Uses mas_slot_locked() and does not need to worry about dead nodes. @@ -2320,8 +2004,8 @@ static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas) wr_mas->node = mas_mn(wr_mas->mas); wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type); - count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type, - wr_mas->pivots, mas->max); + count = mas->end = ma_data_end(wr_mas->node, wr_mas->type, + wr_mas->pivots, mas->max); offset = mas->offset; while (offset < count && mas->index > wr_mas->pivots[offset]) @@ -2333,101 +2017,8 @@ static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas) } /* - * mas_topiary_range() - Add a range of slots to the topiary. - * @mas: The maple state - * @destroy: The topiary to add the slots (usually destroy) - * @start: The starting slot inclusively - * @end: The end slot inclusively - */ -static inline void mas_topiary_range(struct ma_state *mas, - struct ma_topiary *destroy, unsigned char start, unsigned char end) -{ - void __rcu **slots; - unsigned char offset; - - MAS_BUG_ON(mas, mte_is_leaf(mas->node)); - - slots = ma_slots(mas_mn(mas), mte_node_type(mas->node)); - for (offset = start; offset <= end; offset++) { - struct maple_enode *enode = mas_slot_locked(mas, slots, offset); - - if (mte_dead_node(enode)) - continue; - - mat_add(destroy, enode); - } -} - -/* - * mast_topiary() - Add the portions of the tree to the removal list; either to - * be freed or discarded (destroy walk). - * @mast: The maple_subtree_state. - */ -static inline void mast_topiary(struct maple_subtree_state *mast) -{ - MA_WR_STATE(wr_mas, mast->orig_l, NULL); - unsigned char r_start, r_end; - unsigned char l_start, l_end; - void __rcu **l_slots, **r_slots; - - wr_mas.type = mte_node_type(mast->orig_l->node); - mast->orig_l->index = mast->orig_l->last; - mas_wr_node_walk(&wr_mas); - l_start = mast->orig_l->offset + 1; - l_end = mas_data_end(mast->orig_l); - r_start = 0; - r_end = mast->orig_r->offset; - - if (r_end) - r_end--; - - l_slots = ma_slots(mas_mn(mast->orig_l), - mte_node_type(mast->orig_l->node)); - - r_slots = ma_slots(mas_mn(mast->orig_r), - mte_node_type(mast->orig_r->node)); - - if ((l_start < l_end) && - mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_start))) { - l_start++; - } - - if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_end))) { - if (r_end) - r_end--; - } - - if ((l_start > r_end) && (mast->orig_l->node == mast->orig_r->node)) - return; - - /* At the node where left and right sides meet, add the parts between */ - if (mast->orig_l->node == mast->orig_r->node) { - return mas_topiary_range(mast->orig_l, mast->destroy, - l_start, r_end); - } - - /* mast->orig_r is different and consumed. */ - if (mte_is_leaf(mast->orig_r->node)) - return; - - if (mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_end))) - l_end--; - - - if (l_start <= l_end) - mas_topiary_range(mast->orig_l, mast->destroy, l_start, l_end); - - if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_start))) - r_start++; - - if (r_start <= r_end) - mas_topiary_range(mast->orig_r, mast->destroy, 0, r_end); -} - -/* * mast_rebalance_next() - Rebalance against the next node * @mast: The maple subtree state - * @old_r: The encoded maple node to the right (next node). */ static inline void mast_rebalance_next(struct maple_subtree_state *mast) { @@ -2441,7 +2032,6 @@ static inline void mast_rebalance_next(struct maple_subtree_state *mast) /* * mast_rebalance_prev() - Rebalance against the previous node * @mast: The maple subtree state - * @old_l: The encoded maple node to the left (previous node) */ static inline void mast_rebalance_prev(struct maple_subtree_state *mast) { @@ -2459,7 +2049,7 @@ static inline void mast_rebalance_prev(struct maple_subtree_state *mast) /* * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring * the node to the right. Checking the nodes to the right then the left at each - * level upwards until root is reached. Free and destroy as needed. + * level upwards until root is reached. * Data is copied into the @mast->bn. * @mast: The maple_subtree_state. */ @@ -2468,97 +2058,31 @@ bool mast_spanning_rebalance(struct maple_subtree_state *mast) { struct ma_state r_tmp = *mast->orig_r; struct ma_state l_tmp = *mast->orig_l; - struct maple_enode *ancestor = NULL; - unsigned char start, end; unsigned char depth = 0; - r_tmp = *mast->orig_r; - l_tmp = *mast->orig_l; do { mas_ascend(mast->orig_r); mas_ascend(mast->orig_l); depth++; - if (!ancestor && - (mast->orig_r->node == mast->orig_l->node)) { - ancestor = mast->orig_r->node; - end = mast->orig_r->offset - 1; - start = mast->orig_l->offset + 1; - } - if (mast->orig_r->offset < mas_data_end(mast->orig_r)) { - if (!ancestor) { - ancestor = mast->orig_r->node; - start = 0; - } - mast->orig_r->offset++; do { mas_descend(mast->orig_r); mast->orig_r->offset = 0; - depth--; - } while (depth); + } while (--depth); mast_rebalance_next(mast); - do { - unsigned char l_off = 0; - struct maple_enode *child = r_tmp.node; - - mas_ascend(&r_tmp); - if (ancestor == r_tmp.node) - l_off = start; - - if (r_tmp.offset) - r_tmp.offset--; - - if (l_off < r_tmp.offset) - mas_topiary_range(&r_tmp, mast->destroy, - l_off, r_tmp.offset); - - if (l_tmp.node != child) - mat_add(mast->free, child); - - } while (r_tmp.node != ancestor); - *mast->orig_l = l_tmp; return true; - } else if (mast->orig_l->offset != 0) { - if (!ancestor) { - ancestor = mast->orig_l->node; - end = mas_data_end(mast->orig_l); - } - mast->orig_l->offset--; do { mas_descend(mast->orig_l); mast->orig_l->offset = mas_data_end(mast->orig_l); - depth--; - } while (depth); + } while (--depth); mast_rebalance_prev(mast); - do { - unsigned char r_off; - struct maple_enode *child = l_tmp.node; - - mas_ascend(&l_tmp); - if (ancestor == l_tmp.node) - r_off = end; - else - r_off = mas_data_end(&l_tmp); - - if (l_tmp.offset < r_off) - l_tmp.offset++; - - if (l_tmp.offset < r_off) - mas_topiary_range(&l_tmp, mast->destroy, - l_tmp.offset, r_off); - - if (r_tmp.node != child) - mat_add(mast->free, child); - - } while (l_tmp.node != ancestor); - *mast->orig_r = r_tmp; return true; } @@ -2570,36 +2094,24 @@ bool mast_spanning_rebalance(struct maple_subtree_state *mast) } /* - * mast_ascend_free() - Add current original maple state nodes to the free list - * and ascend. + * mast_ascend() - Ascend the original left and right maple states. * @mast: the maple subtree state. * - * Ascend the original left and right sides and add the previous nodes to the - * free list. Set the slots to point to the correct location in the new nodes. + * Ascend the original left and right sides. Set the offsets to point to the + * data already in the new tree (@mast->l and @mast->r). */ -static inline void -mast_ascend_free(struct maple_subtree_state *mast) +static inline void mast_ascend(struct maple_subtree_state *mast) { MA_WR_STATE(wr_mas, mast->orig_r, NULL); - struct maple_enode *left = mast->orig_l->node; - struct maple_enode *right = mast->orig_r->node; - mas_ascend(mast->orig_l); mas_ascend(mast->orig_r); - mat_add(mast->free, left); - - if (left != right) - mat_add(mast->free, right); mast->orig_r->offset = 0; mast->orig_r->index = mast->r->max; /* last should be larger than or equal to index */ if (mast->orig_r->last < mast->orig_r->index) mast->orig_r->last = mast->orig_r->index; - /* - * The node may not contain the value so set slot to ensure all - * of the nodes contents are freed or destroyed. - */ + wr_mas.type = mte_node_type(mast->orig_r->node); mas_wr_node_walk(&wr_mas); /* Set up the left side of things */ @@ -2643,7 +2155,7 @@ static inline struct maple_enode static inline unsigned char mas_mab_to_node(struct ma_state *mas, struct maple_big_node *b_node, struct maple_enode **left, struct maple_enode **right, struct maple_enode **middle, - unsigned char *mid_split, unsigned long min) + unsigned char *mid_split) { unsigned char split = 0; unsigned char slot_count = mt_slots[b_node->type]; @@ -2656,7 +2168,7 @@ static inline unsigned char mas_mab_to_node(struct ma_state *mas, if (b_node->b_end < slot_count) { split = b_node->b_end; } else { - split = mab_calc_split(mas, b_node, mid_split, min); + split = mab_calc_split(mas, b_node, mid_split); *right = mas_new_ma_node(mas, b_node); } @@ -2670,9 +2182,9 @@ static inline unsigned char mas_mab_to_node(struct ma_state *mas, /* * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end * pointer. - * @b_node - the big node to add the entry - * @mas - the maple state to get the pivot (mas->max) - * @entry - the entry to add, if NULL nothing happens. + * @b_node: the big node to add the entry + * @mas: the maple state to get the pivot (mas->max) + * @entry: the entry to add, if NULL nothing happens. */ static inline void mab_set_b_end(struct maple_big_node *b_node, struct ma_state *mas, @@ -2691,11 +2203,11 @@ static inline void mab_set_b_end(struct maple_big_node *b_node, * mas_set_split_parent() - combine_then_separate helper function. Sets the parent * of @mas->node to either @left or @right, depending on @slot and @split * - * @mas - the maple state with the node that needs a parent - * @left - possible parent 1 - * @right - possible parent 2 - * @slot - the slot the mas->node was placed - * @split - the split location between @left and @right + * @mas: the maple state with the node that needs a parent + * @left: possible parent 1 + * @right: possible parent 2 + * @slot: the slot the mas->node was placed + * @split: the split location between @left and @right */ static inline void mas_set_split_parent(struct ma_state *mas, struct maple_enode *left, @@ -2715,11 +2227,11 @@ static inline void mas_set_split_parent(struct ma_state *mas, /* * mte_mid_split_check() - Check if the next node passes the mid-split - * @**l: Pointer to left encoded maple node. - * @**m: Pointer to middle encoded maple node. - * @**r: Pointer to right encoded maple node. + * @l: Pointer to left encoded maple node. + * @m: Pointer to middle encoded maple node. + * @r: Pointer to right encoded maple node. * @slot: The offset - * @*split: The split location. + * @split: The split location. * @mid_split: The middle split. */ static inline void mte_mid_split_check(struct maple_enode **l, @@ -2743,10 +2255,10 @@ static inline void mte_mid_split_check(struct maple_enode **l, /* * mast_set_split_parents() - Helper function to set three nodes parents. Slot * is taken from @mast->l. - * @mast - the maple subtree state - * @left - the left node - * @right - the right node - * @split - the split location. + * @mast: the maple subtree state + * @left: the left node + * @right: the right node + * @split: the split location. */ static inline void mast_set_split_parents(struct maple_subtree_state *mast, struct maple_enode *left, @@ -2778,58 +2290,152 @@ static inline void mast_set_split_parents(struct maple_subtree_state *mast, } /* - * mas_wmb_replace() - Write memory barrier and replace - * @mas: The maple state - * @free: the maple topiary list of nodes to free - * @destroy: The maple topiary list of nodes to destroy (walk and free) + * mas_topiary_node() - Dispose of a single node + * @mas: The maple state for pushing nodes + * @in_rcu: If the tree is in rcu mode * - * Updates gap as necessary. + * The node will either be RCU freed or pushed back on the maple state. */ -static inline void mas_wmb_replace(struct ma_state *mas, - struct ma_topiary *free, - struct ma_topiary *destroy) +static inline void mas_topiary_node(struct ma_state *mas, + struct ma_state *tmp_mas, bool in_rcu) { - /* All nodes must see old data as dead prior to replacing that data */ - smp_wmb(); /* Needed for RCU */ + struct maple_node *tmp; + struct maple_enode *enode; - /* Insert the new data in the tree */ - mas_replace(mas, true); + if (mas_is_none(tmp_mas)) + return; + + enode = tmp_mas->node; + tmp = mte_to_node(enode); + mte_set_node_dead(enode); + ma_free_rcu(tmp); +} - if (!mte_is_leaf(mas->node)) - mas_descend_adopt(mas); +/* + * mas_topiary_replace() - Replace the data with new data, then repair the + * parent links within the new tree. Iterate over the dead sub-tree and collect + * the dead subtrees and topiary the nodes that are no longer of use. + * + * The new tree will have up to three children with the correct parent. Keep + * track of the new entries as they need to be followed to find the next level + * of new entries. + * + * The old tree will have up to three children with the old parent. Keep track + * of the old entries as they may have more nodes below replaced. Nodes within + * [index, last] are dead subtrees, others need to be freed and followed. + * + * @mas: The maple state pointing at the new data + * @old_enode: The maple encoded node being replaced + * @new_height: The new height of the tree as a result of the operation + * + */ +static inline void mas_topiary_replace(struct ma_state *mas, + struct maple_enode *old_enode, unsigned char new_height) +{ + struct ma_state tmp[3], tmp_next[3]; + MA_TOPIARY(subtrees, mas->tree); + bool in_rcu; + int i, n; - mas_mat_free(mas, free); + /* Place data in tree & then mark node as old */ + mas_put_in_tree(mas, old_enode, new_height); - if (destroy) - mas_mat_destroy(mas, destroy); + /* Update the parent pointers in the tree */ + tmp[0] = *mas; + tmp[0].offset = 0; + tmp[1].status = ma_none; + tmp[2].status = ma_none; + while (!mte_is_leaf(tmp[0].node)) { + n = 0; + for (i = 0; i < 3; i++) { + if (mas_is_none(&tmp[i])) + continue; - if (mte_is_leaf(mas->node)) - return; + while (n < 3) { + if (!mas_find_child(&tmp[i], &tmp_next[n])) + break; + n++; + } - mas_update_gap(mas); + mas_adopt_children(&tmp[i], tmp[i].node); + } + + if (MAS_WARN_ON(mas, n == 0)) + break; + + while (n < 3) + tmp_next[n++].status = ma_none; + + for (i = 0; i < 3; i++) + tmp[i] = tmp_next[i]; + } + + /* Collect the old nodes that need to be discarded */ + if (mte_is_leaf(old_enode)) + return mas_free(mas, old_enode); + + tmp[0] = *mas; + tmp[0].offset = 0; + tmp[0].node = old_enode; + tmp[1].status = ma_none; + tmp[2].status = ma_none; + in_rcu = mt_in_rcu(mas->tree); + do { + n = 0; + for (i = 0; i < 3; i++) { + if (mas_is_none(&tmp[i])) + continue; + + while (n < 3) { + if (!mas_find_child(&tmp[i], &tmp_next[n])) + break; + + if ((tmp_next[n].min >= tmp_next->index) && + (tmp_next[n].max <= tmp_next->last)) { + mat_add(&subtrees, tmp_next[n].node); + tmp_next[n].status = ma_none; + } else { + n++; + } + } + } + + if (MAS_WARN_ON(mas, n == 0)) + break; + + while (n < 3) + tmp_next[n++].status = ma_none; + + for (i = 0; i < 3; i++) { + mas_topiary_node(mas, &tmp[i], in_rcu); + tmp[i] = tmp_next[i]; + } + } while (!mte_is_leaf(tmp[0].node)); + + for (i = 0; i < 3; i++) + mas_topiary_node(mas, &tmp[i], in_rcu); + + mas_mat_destroy(mas, &subtrees); } /* - * mast_new_root() - Set a new tree root during subtree creation - * @mast: The maple subtree state + * mas_wmb_replace() - Write memory barrier and replace * @mas: The maple state + * @old_enode: The old maple encoded node that is being replaced. + * @new_height: The new height of the tree as a result of the operation + * + * Updates gap as necessary. */ -static inline void mast_new_root(struct maple_subtree_state *mast, - struct ma_state *mas) +static inline void mas_wmb_replace(struct ma_state *mas, + struct maple_enode *old_enode, unsigned char new_height) { - mas_mn(mast->l)->parent = - ma_parent_ptr(((unsigned long)mas->tree | MA_ROOT_PARENT)); - if (!mte_dead_node(mast->orig_l->node) && - !mte_is_root(mast->orig_l->node)) { - do { - mast_ascend_free(mast); - mast_topiary(mast); - } while (!mte_is_root(mast->orig_l->node)); - } - if ((mast->orig_l->node != mas->node) && - (mast->l->depth > mas_mt_height(mas))) { - mat_add(mast->free, mas->node); - } + /* Insert the new data in the tree */ + mas_topiary_replace(mas, old_enode, new_height); + + if (mte_is_leaf(mas->node)) + return; + + mas_update_gap(mas); } /* @@ -2847,9 +2453,9 @@ static inline void mast_cp_to_nodes(struct maple_subtree_state *mast, { bool new_lmax = true; - mast->l->node = mte_node_or_none(left); - mast->m->node = mte_node_or_none(middle); - mast->r->node = mte_node_or_none(right); + mas_node_or_none(mast->l, left); + mas_node_or_none(mast->m, middle); + mas_node_or_none(mast->r, right); mast->l->min = mast->orig_l->min; if (split == mast->bn->b_end) { @@ -2923,7 +2529,7 @@ static inline bool mast_sufficient(struct maple_subtree_state *mast) */ static inline bool mast_overflow(struct maple_subtree_state *mast) { - if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node)) + if (mast->bn->b_end > mt_slot_count(mast->orig_l->node)) return true; return false; @@ -2945,32 +2551,29 @@ static inline void *mtree_range_walk(struct ma_state *mas) min = mas->min; max = mas->max; do { - offset = 0; last = next; node = mte_to_node(next); type = mte_node_type(next); pivots = ma_pivots(node, type); end = ma_data_end(node, type, pivots, max); - if (unlikely(ma_dead_node(node))) - goto dead_node; - - if (pivots[offset] >= mas->index) { - prev_max = max; - prev_min = min; - max = pivots[offset]; + prev_min = min; + prev_max = max; + if (pivots[0] >= mas->index) { + offset = 0; + max = pivots[0]; goto next; } - do { + offset = 1; + while (offset < end) { + if (pivots[offset] >= mas->index) { + max = pivots[offset]; + break; + } offset++; - } while ((offset < end) && (pivots[offset] < mas->index)); + } - prev_min = min; min = pivots[offset - 1] + 1; - prev_max = max; - if (likely(offset < end && pivots[offset])) - max = pivots[offset]; - next: slots = ma_slots(node, type); next = mt_slot(mas->tree, slots, offset); @@ -2978,6 +2581,7 @@ next: goto dead_node; } while (!ma_is_leaf(type)); + mas->end = end; mas->offset = offset; mas->index = min; mas->last = max; @@ -3006,21 +2610,19 @@ dead_node: * orig_l_mas->last is used in mas_consume to find the slots that will need to * be either freed or destroyed. orig_l_mas->depth keeps track of the height of * the new sub-tree in case the sub-tree becomes the full tree. - * - * Return: the number of elements in b_node during the last loop. */ -static int mas_spanning_rebalance(struct ma_state *mas, +static void mas_spanning_rebalance(struct ma_state *mas, struct maple_subtree_state *mast, unsigned char count) { unsigned char split, mid_split; unsigned char slot = 0; + unsigned char new_height = 0; /* used if node is a new root */ struct maple_enode *left = NULL, *middle = NULL, *right = NULL; + struct maple_enode *old_enode; MA_STATE(l_mas, mas->tree, mas->index, mas->index); MA_STATE(r_mas, mas->tree, mas->index, mas->last); MA_STATE(m_mas, mas->tree, mas->index, mas->index); - MA_TOPIARY(free, mas->tree); - MA_TOPIARY(destroy, mas->tree); /* * The tree needs to be rebalanced and leaves need to be kept at the same level. @@ -3029,17 +2631,13 @@ static int mas_spanning_rebalance(struct ma_state *mas, mast->l = &l_mas; mast->m = &m_mas; mast->r = &r_mas; - mast->free = &free; - mast->destroy = &destroy; - l_mas.node = r_mas.node = m_mas.node = MAS_NONE; + l_mas.status = r_mas.status = m_mas.status = ma_none; /* Check if this is not root and has sufficient data. */ if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) && unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type])) mast_spanning_rebalance(mast); - mast->orig_l->depth = 0; - /* * Each level of the tree is examined and balanced, pushing data to the left or * right, or rebalancing against left or right nodes is employed to avoid @@ -3049,16 +2647,17 @@ static int mas_spanning_rebalance(struct ma_state *mas, * original tree and the partially new tree. To remedy the parent pointers in * the old tree, the new data is swapped into the active tree and a walk down * the tree is performed and the parent pointers are updated. - * See mas_descend_adopt() for more information.. + * See mas_topiary_replace() for more information. */ while (count--) { mast->bn->b_end--; mast->bn->type = mte_node_type(mast->orig_l->node); split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle, - &mid_split, mast->orig_l->min); + &mid_split); mast_set_split_parents(mast, left, middle, right, split, mid_split); mast_cp_to_nodes(mast, left, middle, right, split, mid_split); + new_height++; /* * Copy data from next level in the tree to mast->bn from next @@ -3066,13 +2665,12 @@ static int mas_spanning_rebalance(struct ma_state *mas, */ memset(mast->bn, 0, sizeof(struct maple_big_node)); mast->bn->type = mte_node_type(left); - mast->orig_l->depth++; /* Root already stored in l->node. */ if (mas_is_root_limits(mast->l)) goto new_root; - mast_ascend_free(mast); + mast_ascend(mast); mast_combine_cp_left(mast); l_mas.offset = mast->bn->b_end; mab_set_b_end(mast->bn, &l_mas, left); @@ -3081,14 +2679,23 @@ static int mas_spanning_rebalance(struct ma_state *mas, /* Copy anything necessary out of the right node. */ mast_combine_cp_right(mast); - mast_topiary(mast); mast->orig_l->last = mast->orig_l->max; - if (mast_sufficient(mast)) - continue; + if (mast_sufficient(mast)) { + if (mast_overflow(mast)) + continue; + + if (mast->orig_l->node == mast->orig_r->node) { + /* + * The data in b_node should be stored in one + * node and in the tree + */ + slot = mast->l->offset; + break; + } - if (mast_overflow(mast)) continue; + } /* May be a new root stored in mast->bn */ if (mas_is_root_limits(mast->orig_l)) @@ -3103,8 +2710,9 @@ static int mas_spanning_rebalance(struct ma_state *mas, l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), mte_node_type(mast->orig_l->node)); - mast->orig_l->depth++; + mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true); + new_height++; mas_set_parent(mas, left, l_mas.node, slot); if (middle) mas_set_parent(mas, middle, l_mas.node, ++slot); @@ -3114,25 +2722,22 @@ static int mas_spanning_rebalance(struct ma_state *mas, if (mas_is_root_limits(mast->l)) { new_root: - mast_new_root(mast, mas); + mas_mn(mast->l)->parent = ma_parent_ptr(mas_tree_parent(mas)); + while (!mte_is_root(mast->orig_l->node)) + mast_ascend(mast); } else { mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent; } - if (!mte_dead_node(mast->orig_l->node)) - mat_add(&free, mast->orig_l->node); - - mas->depth = mast->orig_l->depth; - *mast->orig_l = l_mas; - mte_set_node_dead(mas->node); - - /* Set up mas for insertion. */ - mast->orig_l->depth = mas->depth; - mast->orig_l->alloc = mas->alloc; - *mas = *mast->orig_l; - mas_wmb_replace(mas, &free, &destroy); + old_enode = mast->orig_l->node; + mas->depth = l_mas.depth; + mas->node = l_mas.node; + mas->min = l_mas.min; + mas->max = l_mas.max; + mas->offset = l_mas.offset; + mas_wmb_replace(mas, old_enode, new_height); mtree_range_walk(mas); - return mast->bn->b_end; + return; } /* @@ -3142,10 +2747,8 @@ new_root: * * Rebalance two nodes into a single node or two new nodes that are sufficient. * Continue upwards until tree is sufficient. - * - * Return: the number of elements in b_node during the last loop. */ -static inline int mas_rebalance(struct ma_state *mas, +static inline void mas_rebalance(struct ma_state *mas, struct maple_big_node *b_node) { char empty_count = mas_mt_height(mas); @@ -3155,7 +2758,7 @@ static inline int mas_rebalance(struct ma_state *mas, MA_STATE(l_mas, mas->tree, mas->index, mas->last); MA_STATE(r_mas, mas->tree, mas->index, mas->last); - trace_ma_op(__func__, mas); + trace_ma_op(TP_FCT, mas); /* * Rebalancing occurs if a node is insufficient. Data is rebalanced @@ -3166,9 +2769,6 @@ static inline int mas_rebalance(struct ma_state *mas, * tries to combine the data in the same way. If one node contains the * entire range of the tree, then that node is used as a new root node. */ - mas_node_count(mas, 1 + empty_count * 3); - if (mas_is_err(mas)) - return 0; mast.orig_l = &l_mas; mast.orig_r = &r_mas; @@ -3194,134 +2794,12 @@ static inline int mas_rebalance(struct ma_state *mas, } /* - * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple - * state. - * @mas: The maple state - * @end: The end of the left-most node. - * - * During a mass-insert event (such as forking), it may be necessary to - * rebalance the left-most node when it is not sufficient. - */ -static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end) -{ - enum maple_type mt = mte_node_type(mas->node); - struct maple_node reuse, *newnode, *parent, *new_left, *left, *node; - struct maple_enode *eparent; - unsigned char offset, tmp, split = mt_slots[mt] / 2; - void __rcu **l_slots, **slots; - unsigned long *l_pivs, *pivs, gap; - bool in_rcu = mt_in_rcu(mas->tree); - - MA_STATE(l_mas, mas->tree, mas->index, mas->last); - - l_mas = *mas; - mas_prev_sibling(&l_mas); - - /* set up node. */ - if (in_rcu) { - /* Allocate for both left and right as well as parent. */ - mas_node_count(mas, 3); - if (mas_is_err(mas)) - return; - - newnode = mas_pop_node(mas); - } else { - newnode = &reuse; - } - - node = mas_mn(mas); - newnode->parent = node->parent; - slots = ma_slots(newnode, mt); - pivs = ma_pivots(newnode, mt); - left = mas_mn(&l_mas); - l_slots = ma_slots(left, mt); - l_pivs = ma_pivots(left, mt); - if (!l_slots[split]) - split++; - tmp = mas_data_end(&l_mas) - split; - - memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp); - memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp); - pivs[tmp] = l_mas.max; - memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end); - memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end); - - l_mas.max = l_pivs[split]; - mas->min = l_mas.max + 1; - eparent = mt_mk_node(mte_parent(l_mas.node), - mas_parent_type(&l_mas, l_mas.node)); - tmp += end; - if (!in_rcu) { - unsigned char max_p = mt_pivots[mt]; - unsigned char max_s = mt_slots[mt]; - - if (tmp < max_p) - memset(pivs + tmp, 0, - sizeof(unsigned long) * (max_p - tmp)); - - if (tmp < mt_slots[mt]) - memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp)); - - memcpy(node, newnode, sizeof(struct maple_node)); - ma_set_meta(node, mt, 0, tmp - 1); - mte_set_pivot(eparent, mte_parent_slot(l_mas.node), - l_pivs[split]); - - /* Remove data from l_pivs. */ - tmp = split + 1; - memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp)); - memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp)); - ma_set_meta(left, mt, 0, split); - - goto done; - } - - /* RCU requires replacing both l_mas, mas, and parent. */ - mas->node = mt_mk_node(newnode, mt); - ma_set_meta(newnode, mt, 0, tmp); - - new_left = mas_pop_node(mas); - new_left->parent = left->parent; - mt = mte_node_type(l_mas.node); - slots = ma_slots(new_left, mt); - pivs = ma_pivots(new_left, mt); - memcpy(slots, l_slots, sizeof(void *) * split); - memcpy(pivs, l_pivs, sizeof(unsigned long) * split); - ma_set_meta(new_left, mt, 0, split); - l_mas.node = mt_mk_node(new_left, mt); - - /* replace parent. */ - offset = mte_parent_slot(mas->node); - mt = mas_parent_type(&l_mas, l_mas.node); - parent = mas_pop_node(mas); - slots = ma_slots(parent, mt); - pivs = ma_pivots(parent, mt); - memcpy(parent, mte_to_node(eparent), sizeof(struct maple_node)); - rcu_assign_pointer(slots[offset], mas->node); - rcu_assign_pointer(slots[offset - 1], l_mas.node); - pivs[offset - 1] = l_mas.max; - eparent = mt_mk_node(parent, mt); -done: - gap = mas_leaf_max_gap(mas); - mte_set_gap(eparent, mte_parent_slot(mas->node), gap); - gap = mas_leaf_max_gap(&l_mas); - mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap); - mas_ascend(mas); - - if (in_rcu) - mas_replace(mas, false); - - mas_update_gap(mas); -} - -/* * mas_split_final_node() - Split the final node in a subtree operation. * @mast: the maple subtree state * @mas: The maple state - * @height: The height of the tree in case it's a new root. */ -static inline bool mas_split_final_node(struct maple_subtree_state *mast, - struct ma_state *mas, int height) +static inline void mas_split_final_node(struct maple_subtree_state *mast, + struct ma_state *mas) { struct maple_enode *ancestor; @@ -3330,7 +2808,6 @@ static inline bool mas_split_final_node(struct maple_subtree_state *mast, mast->bn->type = maple_arange_64; else mast->bn->type = maple_range_64; - mas->depth = height; } /* * Only a single node is used here, could be root. @@ -3344,7 +2821,6 @@ static inline bool mas_split_final_node(struct maple_subtree_state *mast, mast->l->node = ancestor; mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true); mas->offset = mast->bn->b_end - 1; - return true; } /* @@ -3358,19 +2834,14 @@ static inline void mast_fill_bnode(struct maple_subtree_state *mast, unsigned char skip) { bool cp = true; - struct maple_enode *old = mas->node; unsigned char split; - memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap)); - memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot)); - memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot)); - mast->bn->b_end = 0; + memset(mast->bn, 0, sizeof(struct maple_big_node)); if (mte_is_root(mas->node)) { cp = false; } else { mas_ascend(mas); - mat_add(mast->free, old); mas->offset = mte_parent_slot(mas->node); } @@ -3424,7 +2895,6 @@ static inline void mast_split_data(struct maple_subtree_state *mast, * mas_push_data() - Instead of splitting a node, it is beneficial to push the * data to the right or left node if there is room. * @mas: The maple state - * @height: The current height of the maple state * @mast: The maple subtree state * @left: Push left or not. * @@ -3432,8 +2902,8 @@ static inline void mast_split_data(struct maple_subtree_state *mast, * * Return: True if pushed, false otherwise. */ -static inline bool mas_push_data(struct ma_state *mas, int height, - struct maple_subtree_state *mast, bool left) +static inline bool mas_push_data(struct ma_state *mas, + struct maple_subtree_state *mast, bool left) { unsigned char slot_total = mast->bn->b_end; unsigned char end, space, split; @@ -3474,13 +2944,11 @@ static inline bool mas_push_data(struct ma_state *mas, int height, split = mt_slots[mast->bn->type] - 2; if (left) { /* Switch mas to prev node */ - mat_add(mast->free, mas->node); *mas = tmp_mas; /* Start using mast->l for the left side. */ tmp_mas.node = mast->l->node; *mast->l = tmp_mas; } else { - mat_add(mast->free, tmp_mas.node); tmp_mas.node = mast->r->node; *mast->r = tmp_mas; split = slot_total - split; @@ -3492,7 +2960,7 @@ static inline bool mas_push_data(struct ma_state *mas, int height, mast_split_data(mast, mas, split); mast_fill_bnode(mast, mas, 2); - mas_split_final_node(mast, mas, height + 1); + mas_split_final_node(mast, mas); return true; } @@ -3500,13 +2968,14 @@ static inline bool mas_push_data(struct ma_state *mas, int height, * mas_split() - Split data that is too big for one node into two. * @mas: The maple state * @b_node: The maple big node - * Return: 1 on success, 0 on failure. */ -static int mas_split(struct ma_state *mas, struct maple_big_node *b_node) +static void mas_split(struct ma_state *mas, struct maple_big_node *b_node) { struct maple_subtree_state mast; int height = 0; + unsigned int orig_height = mas_mt_height(mas); unsigned char mid_split, split = 0; + struct maple_enode *old; /* * Splitting is handled differently from any other B-tree; the Maple @@ -3529,25 +2998,18 @@ static int mas_split(struct ma_state *mas, struct maple_big_node *b_node) MA_STATE(r_mas, mas->tree, mas->index, mas->last); MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last); MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last); - MA_TOPIARY(mat, mas->tree); - trace_ma_op(__func__, mas); - mas->depth = mas_mt_height(mas); - /* Allocation failures will happen early. */ - mas_node_count(mas, 1 + mas->depth * 2); - if (mas_is_err(mas)) - return 0; + trace_ma_op(TP_FCT, mas); mast.l = &l_mas; mast.r = &r_mas; mast.orig_l = &prev_l_mas; mast.orig_r = &prev_r_mas; - mast.free = &mat; mast.bn = b_node; - while (height++ <= mas->depth) { + while (height++ <= orig_height) { if (mt_slots[b_node->type] > b_node->b_end) { - mas_split_final_node(&mast, mas, height); + mas_split_final_node(&mast, mas); break; } @@ -3562,14 +3024,17 @@ static int mas_split(struct ma_state *mas, struct maple_big_node *b_node) * is a significant savings. */ /* Try to push left. */ - if (mas_push_data(mas, height, &mast, true)) + if (mas_push_data(mas, &mast, true)) { + height++; break; - + } /* Try to push right. */ - if (mas_push_data(mas, height, &mast, false)) + if (mas_push_data(mas, &mast, false)) { + height++; break; + } - split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min); + split = mab_calc_split(mas, b_node, &mid_split); mast_split_data(&mast, mas, split); /* * Usually correct, mab_mas_cp in the above call overwrites @@ -3582,76 +3047,29 @@ static int mas_split(struct ma_state *mas, struct maple_big_node *b_node) } /* Set the original node as dead */ - mat_add(mast.free, mas->node); + old = mas->node; mas->node = l_mas.node; - mas_wmb_replace(mas, mast.free, NULL); + mas_wmb_replace(mas, old, height); mtree_range_walk(mas); - return 1; -} - -/* - * mas_reuse_node() - Reuse the node to store the data. - * @wr_mas: The maple write state - * @bn: The maple big node - * @end: The end of the data. - * - * Will always return false in RCU mode. - * - * Return: True if node was reused, false otherwise. - */ -static inline bool mas_reuse_node(struct ma_wr_state *wr_mas, - struct maple_big_node *bn, unsigned char end) -{ - /* Need to be rcu safe. */ - if (mt_in_rcu(wr_mas->mas->tree)) - return false; - - if (end > bn->b_end) { - int clear = mt_slots[wr_mas->type] - bn->b_end; - - memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--); - memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear); - } - mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false); - return true; + return; } /* * mas_commit_b_node() - Commit the big node into the tree. * @wr_mas: The maple write state * @b_node: The maple big node - * @end: The end of the data. */ -static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas, - struct maple_big_node *b_node, unsigned char end) +static noinline_for_kasan void mas_commit_b_node(struct ma_wr_state *wr_mas, + struct maple_big_node *b_node) { - struct maple_node *node; - unsigned char b_end = b_node->b_end; - enum maple_type b_type = b_node->type; - - if ((b_end < mt_min_slots[b_type]) && - (!mte_is_root(wr_mas->mas->node)) && - (mas_mt_height(wr_mas->mas) > 1)) - return mas_rebalance(wr_mas->mas, b_node); - - if (b_end >= mt_slots[b_type]) - return mas_split(wr_mas->mas, b_node); + enum store_type type = wr_mas->mas->store_type; - if (mas_reuse_node(wr_mas, b_node, end)) - goto reuse_node; + WARN_ON_ONCE(type != wr_rebalance && type != wr_split_store); - mas_node_count(wr_mas->mas, 1); - if (mas_is_err(wr_mas->mas)) - return 0; + if (type == wr_rebalance) + return mas_rebalance(wr_mas->mas, b_node); - node = mas_pop_node(wr_mas->mas); - node->parent = mas_mn(wr_mas->mas)->parent; - wr_mas->mas->node = mt_mk_node(node, b_type); - mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false); - mas_replace(wr_mas->mas, false); -reuse_node: - mas_update_gap(wr_mas->mas); - return 1; + return mas_split(wr_mas->mas, b_node); } /* @@ -3659,7 +3077,7 @@ reuse_node: * @mas: The maple state * @entry: The entry to store into the tree */ -static inline int mas_root_expand(struct ma_state *mas, void *entry) +static inline void mas_root_expand(struct ma_state *mas, void *entry) { void *contents = mas_root_locked(mas); enum maple_type type = maple_leaf_64; @@ -3668,16 +3086,12 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry) unsigned long *pivots; int slot = 0; - mas_node_count(mas, 1); - if (unlikely(mas_is_err(mas))) - return 0; - node = mas_pop_node(mas); pivots = ma_pivots(node, type); slots = ma_slots(node, type); - node->parent = ma_parent_ptr( - ((unsigned long)mas->tree | MA_ROOT_PARENT)); + node->parent = ma_parent_ptr(mas_tree_parent(mas)); mas->node = mt_mk_node(node, type); + mas->status = ma_active; if (mas->index) { if (contents) { @@ -3694,33 +3108,40 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry) if (mas->last != ULONG_MAX) pivots[++slot] = ULONG_MAX; - mas->depth = 1; - mas_set_height(mas); + mt_set_height(mas->tree, 1); ma_set_meta(node, maple_leaf_64, 0, slot); /* swap the new root into the tree */ rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); - return slot; + return; } +/* + * mas_store_root() - Storing value into root. + * @mas: The maple state + * @entry: The entry to store. + * + * There is no root node now and we are storing a value into the root - this + * function either assigns the pointer or expands into a node. + */ static inline void mas_store_root(struct ma_state *mas, void *entry) { - if (likely((mas->last != 0) || (mas->index != 0))) + if (!entry) { + if (!mas->index) + rcu_assign_pointer(mas->tree->ma_root, NULL); + } else if (likely((mas->last != 0) || (mas->index != 0))) mas_root_expand(mas, entry); else if (((unsigned long) (entry) & 3) == 2) mas_root_expand(mas, entry); else { rcu_assign_pointer(mas->tree->ma_root, entry); - mas->node = MAS_START; + mas->status = ma_start; } } /* * mas_is_span_wr() - Check if the write needs to be treated as a write that * spans the node. - * @mas: The maple state - * @piv: The pivot value being written - * @type: The maple node type - * @entry: The data to write + * @wr_mas: The maple write state * * Spanning writes are writes that start in one node and end in another OR if * the write of a %NULL will cause the node to end with a %NULL. @@ -3753,7 +3174,7 @@ static bool mas_is_span_wr(struct ma_wr_state *wr_mas) return false; } - trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry); + trace_ma_write(TP_FCT, wr_mas->mas, wr_mas->r_max, entry); return true; } @@ -3794,13 +3215,23 @@ static bool mas_wr_walk(struct ma_wr_state *wr_mas) if (ma_is_leaf(wr_mas->type)) return true; + if (mas->end < mt_slots[wr_mas->type] - 1) + wr_mas->vacant_height = mas->depth + 1; + + if (ma_is_root(mas_mn(mas))) { + /* root needs more than 2 entries to be sufficient + 1 */ + if (mas->end > 2) + wr_mas->sufficient_height = 1; + } else if (mas->end > mt_min_slots[wr_mas->type] + 1) + wr_mas->sufficient_height = mas->depth + 1; + mas_wr_walk_traverse(wr_mas); } return true; } -static bool mas_wr_walk_index(struct ma_wr_state *wr_mas) +static void mas_wr_walk_index(struct ma_wr_state *wr_mas) { struct ma_state *mas = wr_mas->mas; @@ -3809,11 +3240,9 @@ static bool mas_wr_walk_index(struct ma_wr_state *wr_mas) wr_mas->content = mas_slot_locked(mas, wr_mas->slots, mas->offset); if (ma_is_leaf(wr_mas->type)) - return true; + return; mas_wr_walk_traverse(wr_mas); - } - return true; } /* * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs. @@ -3887,23 +3316,17 @@ static inline void *mtree_lookup_walk(struct ma_state *mas) enum maple_type type; void __rcu **slots; unsigned char end; - unsigned long max; next = mas->node; - max = ULONG_MAX; do { - offset = 0; node = mte_to_node(next); type = mte_node_type(next); pivots = ma_pivots(node, type); - end = ma_data_end(node, type, pivots, max); - if (unlikely(ma_dead_node(node))) - goto dead_node; + end = mt_pivots[type]; + offset = 0; do { - if (pivots[offset] >= mas->index) { - max = pivots[offset]; + if (pivots[offset] >= mas->index) break; - } } while (++offset < end); slots = ma_slots(node, type); @@ -3919,6 +3342,7 @@ dead_node: return NULL; } +static void mte_destroy_walk(struct maple_enode *, struct maple_tree *); /* * mas_new_root() - Create a new root node that only contains the entry passed * in. @@ -3926,10 +3350,8 @@ dead_node: * @entry: The entry to store. * * Only valid when the index == 0 and the last == ULONG_MAX - * - * Return 0 on error, 1 on success. */ -static inline int mas_new_root(struct ma_state *mas, void *entry) +static inline void mas_new_root(struct ma_state *mas, void *entry) { struct maple_enode *root = mas_root_locked(mas); enum maple_type type = maple_leaf_64; @@ -3937,35 +3359,31 @@ static inline int mas_new_root(struct ma_state *mas, void *entry) void __rcu **slots; unsigned long *pivots; - if (!entry && !mas->index && mas->last == ULONG_MAX) { - mas->depth = 0; - mas_set_height(mas); + WARN_ON_ONCE(mas->index || mas->last != ULONG_MAX); + + if (!entry) { + mt_set_height(mas->tree, 0); rcu_assign_pointer(mas->tree->ma_root, entry); - mas->node = MAS_START; + mas->status = ma_start; goto done; } - mas_node_count(mas, 1); - if (mas_is_err(mas)) - return 0; - node = mas_pop_node(mas); pivots = ma_pivots(node, type); slots = ma_slots(node, type); - node->parent = ma_parent_ptr( - ((unsigned long)mas->tree | MA_ROOT_PARENT)); + node->parent = ma_parent_ptr(mas_tree_parent(mas)); mas->node = mt_mk_node(node, type); + mas->status = ma_active; rcu_assign_pointer(slots[0], entry); pivots[0] = mas->last; - mas->depth = 1; - mas_set_height(mas); + mt_set_height(mas->tree, 1); rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); done: if (xa_is_node(root)) mte_destroy_walk(root, mas->tree); - return 1; + return; } /* * mas_wr_spanning_store() - Create a subtree with the store operation completed @@ -3973,10 +3391,8 @@ done: * Note that mas is expected to point to the node which caused the store to * span. * @wr_mas: The maple write state - * - * Return: 0 on error, positive on success. */ -static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) +static noinline void mas_wr_spanning_store(struct ma_wr_state *wr_mas) { struct maple_subtree_state mast; struct maple_big_node b_node; @@ -3986,7 +3402,6 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) /* Left and Right side of spanning store */ MA_STATE(l_mas, NULL, 0, 0); MA_STATE(r_mas, NULL, 0, 0); - MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry); MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry); @@ -4003,7 +3418,7 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) * of data may happen. */ mas = wr_mas->mas; - trace_ma_op(__func__, mas); + trace_ma_op(TP_FCT, mas); if (unlikely(!mas->index && mas->last == ULONG_MAX)) return mas_new_root(mas, wr_mas->entry); @@ -4012,9 +3427,6 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) * entries per level plus a new root. */ height = mas_mt_height(mas); - mas_node_count(mas, 1 + height * 3); - if (mas_is_err(mas)) - return 0; /* * Set up right side. Need to get to the next offset after the spanning @@ -4049,10 +3461,10 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) memset(&b_node, 0, sizeof(struct maple_big_node)); /* Copy l_mas and store the value in b_node. */ - mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end); - /* Copy r_mas into b_node. */ - if (r_mas.offset <= r_wr_mas.node_end) - mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end, + mas_store_b_node(&l_wr_mas, &b_node, l_mas.end); + /* Copy r_mas into b_node if there is anything to copy. */ + if (r_mas.max > r_mas.last) + mas_mab_cp(&r_mas, r_mas.offset, r_mas.end, &b_node, b_node.b_end + 1); else b_node.b_end++; @@ -4072,10 +3484,8 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) * @wr_mas: The maple write state * * Attempts to reuse the node, but may allocate. - * - * Return: True if stored, false otherwise */ -static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas, +static inline void mas_wr_node_store(struct ma_wr_state *wr_mas, unsigned char new_end) { struct ma_state *mas = wr_mas->mas; @@ -4085,23 +3495,13 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas, struct maple_node reuse, *newnode; unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type]; bool in_rcu = mt_in_rcu(mas->tree); - - /* Check if there is enough data. The room is enough. */ - if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) && - !(mas->mas_flags & MA_STATE_BULK)) - return false; + unsigned char height = mas_mt_height(mas); if (mas->last == wr_mas->end_piv) offset_end++; /* don't copy this offset */ - else if (unlikely(wr_mas->r_max == ULONG_MAX)) - mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type); /* set up node. */ if (in_rcu) { - mas_node_count(mas, 1); - if (mas_is_err(mas)) - return false; - newnode = mas_pop_node(mas); } else { memset(&reuse, 0, sizeof(struct maple_node)); @@ -4130,12 +3530,12 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas, * this range wrote to the end of the node or it overwrote the rest of * the data */ - if (offset_end > wr_mas->node_end) + if (offset_end > mas->end) goto done; dst_offset = mas->offset + 1; /* Copy to the end of node if necessary. */ - copy_size = wr_mas->node_end - offset_end + 1; + copy_size = mas->end - offset_end + 1; memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end, sizeof(void *) * copy_size); memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end, @@ -4145,49 +3545,60 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas, dst_pivots[new_end] = mas->max; done: - mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end); + mas_leaf_set_meta(newnode, maple_leaf_64, new_end); if (in_rcu) { - mte_set_node_dead(mas->node); + struct maple_enode *old_enode = mas->node; + mas->node = mt_mk_node(newnode, wr_mas->type); - mas_replace(mas, false); + mas_replace_node(mas, old_enode, height); } else { memcpy(wr_mas->node, newnode, sizeof(struct maple_node)); } - trace_ma_write(__func__, mas, 0, wr_mas->entry); + trace_ma_write(TP_FCT, mas, 0, wr_mas->entry); mas_update_gap(mas); - return true; + mas->end = new_end; + return; } /* * mas_wr_slot_store: Attempt to store a value in a slot. * @wr_mas: the maple write state - * - * Return: True if stored, false otherwise */ -static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas) +static inline void mas_wr_slot_store(struct ma_wr_state *wr_mas) { struct ma_state *mas = wr_mas->mas; unsigned char offset = mas->offset; + void __rcu **slots = wr_mas->slots; bool gap = false; - if (wr_mas->offset_end - offset != 1) - return false; - - gap |= !mt_slot_locked(mas->tree, wr_mas->slots, offset); - gap |= !mt_slot_locked(mas->tree, wr_mas->slots, offset + 1); + gap |= !mt_slot_locked(mas->tree, slots, offset); + gap |= !mt_slot_locked(mas->tree, slots, offset + 1); - if (mas->index == wr_mas->r_min) { - /* Overwriting the range and over a part of the next range. */ - rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry); - wr_mas->pivots[offset] = mas->last; + if (wr_mas->offset_end - offset == 1) { + if (mas->index == wr_mas->r_min) { + /* Overwriting the range and a part of the next one */ + rcu_assign_pointer(slots[offset], wr_mas->entry); + wr_mas->pivots[offset] = mas->last; + } else { + /* Overwriting a part of the range and the next one */ + rcu_assign_pointer(slots[offset + 1], wr_mas->entry); + wr_mas->pivots[offset] = mas->index - 1; + mas->offset++; /* Keep mas accurate. */ + } } else { - /* Overwriting a part of the range and over the next range */ - rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry); + WARN_ON_ONCE(mt_in_rcu(mas->tree)); + /* + * Expand the range, only partially overwriting the previous and + * next ranges + */ + gap |= !mt_slot_locked(mas->tree, slots, offset + 2); + rcu_assign_pointer(slots[offset + 1], wr_mas->entry); wr_mas->pivots[offset] = mas->index - 1; + wr_mas->pivots[offset + 1] = mas->last; mas->offset++; /* Keep mas accurate. */ } - trace_ma_write(__func__, mas, 0, wr_mas->entry); + trace_ma_write(TP_FCT, mas, 0, wr_mas->entry); /* * Only update gap when the new entry is empty or there is an empty * entry in the original two ranges. @@ -4195,19 +3606,7 @@ static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas) if (!wr_mas->entry || gap) mas_update_gap(mas); - return true; -} - -static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas) -{ - while ((wr_mas->offset_end < wr_mas->node_end) && - (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end])) - wr_mas->offset_end++; - - if (wr_mas->offset_end < wr_mas->node_end) - wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end]; - else - wr_mas->end_piv = wr_mas->mas->max; + return; } static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas) @@ -4220,10 +3619,10 @@ static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas) } else { /* Check next slot(s) if we are overwriting the end */ if ((mas->last == wr_mas->end_piv) && - (wr_mas->node_end != wr_mas->offset_end) && + (mas->end != wr_mas->offset_end) && !wr_mas->slots[wr_mas->offset_end + 1]) { wr_mas->offset_end++; - if (wr_mas->offset_end == wr_mas->node_end) + if (wr_mas->offset_end == mas->end) mas->last = mas->max; else mas->last = wr_mas->pivots[wr_mas->offset_end]; @@ -4246,10 +3645,22 @@ static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas) } } +static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas) +{ + while ((wr_mas->offset_end < wr_mas->mas->end) && + (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end])) + wr_mas->offset_end++; + + if (wr_mas->offset_end < wr_mas->mas->end) + wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end]; + else + wr_mas->end_piv = wr_mas->mas->max; +} + static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas) { struct ma_state *mas = wr_mas->mas; - unsigned char new_end = wr_mas->node_end + 2; + unsigned char new_end = mas->end + 2; new_end -= wr_mas->offset_end - mas->offset; if (wr_mas->r_min == mas->index) @@ -4264,40 +3675,52 @@ static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas) /* * mas_wr_append: Attempt to append * @wr_mas: the maple write state + * @new_end: The end of the node after the modification * - * Return: True if appended, false otherwise + * This is currently unsafe in rcu mode since the end of the node may be cached + * by readers while the node contents may be updated which could result in + * inaccurate information. */ -static inline bool mas_wr_append(struct ma_wr_state *wr_mas) +static inline void mas_wr_append(struct ma_wr_state *wr_mas, + unsigned char new_end) { - unsigned char end = wr_mas->node_end; - unsigned char new_end = end + 1; struct ma_state *mas = wr_mas->mas; - unsigned char node_pivots = mt_pivots[wr_mas->type]; - - if (mas->offset != wr_mas->node_end) - return false; + void __rcu **slots; + unsigned char end = mas->end; - if (new_end < node_pivots) { + if (new_end < mt_pivots[wr_mas->type]) { wr_mas->pivots[new_end] = wr_mas->pivots[end]; - ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end); + ma_set_meta(wr_mas->node, wr_mas->type, 0, new_end); } - if (mas->last == wr_mas->r_max) { - /* Append to end of range */ - rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->entry); - wr_mas->pivots[end] = mas->index - 1; - mas->offset = new_end; + slots = wr_mas->slots; + if (new_end == end + 1) { + if (mas->last == wr_mas->r_max) { + /* Append to end of range */ + rcu_assign_pointer(slots[new_end], wr_mas->entry); + wr_mas->pivots[end] = mas->index - 1; + mas->offset = new_end; + } else { + /* Append to start of range */ + rcu_assign_pointer(slots[new_end], wr_mas->content); + wr_mas->pivots[end] = mas->last; + rcu_assign_pointer(slots[end], wr_mas->entry); + } } else { - /* Append to start of range */ - rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content); - wr_mas->pivots[end] = mas->last; - rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry); + /* Append to the range without touching any boundaries. */ + rcu_assign_pointer(slots[new_end], wr_mas->content); + wr_mas->pivots[end + 1] = mas->last; + rcu_assign_pointer(slots[end + 1], wr_mas->entry); + wr_mas->pivots[end] = mas->index - 1; + mas->offset = end + 1; } if (!wr_mas->content || !wr_mas->entry) mas_update_gap(mas); - return true; + mas->end = new_end; + trace_ma_write(TP_FCT, mas, new_end, wr_mas->entry); + return; } /* @@ -4310,86 +3733,223 @@ static void mas_wr_bnode(struct ma_wr_state *wr_mas) { struct maple_big_node b_node; - trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry); + trace_ma_write(TP_FCT, wr_mas->mas, 0, wr_mas->entry); memset(&b_node, 0, sizeof(struct maple_big_node)); mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end); - mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end); + mas_commit_b_node(wr_mas, &b_node); } -static inline void mas_wr_modify(struct ma_wr_state *wr_mas) +/* + * mas_wr_store_entry() - Internal call to store a value + * @wr_mas: The maple write state + */ +static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas) { struct ma_state *mas = wr_mas->mas; - unsigned char new_end; + unsigned char new_end = mas_wr_new_end(wr_mas); - /* Direct replacement */ - if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) { + switch (mas->store_type) { + case wr_exact_fit: rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry); if (!!wr_mas->entry ^ !!wr_mas->content) mas_update_gap(mas); - return; + break; + case wr_append: + mas_wr_append(wr_mas, new_end); + break; + case wr_slot_store: + mas_wr_slot_store(wr_mas); + break; + case wr_node_store: + mas_wr_node_store(wr_mas, new_end); + break; + case wr_spanning_store: + mas_wr_spanning_store(wr_mas); + break; + case wr_split_store: + case wr_rebalance: + mas_wr_bnode(wr_mas); + break; + case wr_new_root: + mas_new_root(mas, wr_mas->entry); + break; + case wr_store_root: + mas_store_root(mas, wr_mas->entry); + break; + case wr_invalid: + MT_BUG_ON(mas->tree, 1); + } + + return; +} + +static inline void mas_wr_prealloc_setup(struct ma_wr_state *wr_mas) +{ + struct ma_state *mas = wr_mas->mas; + + if (!mas_is_active(mas)) { + if (mas_is_start(mas)) + goto set_content; + + if (unlikely(mas_is_paused(mas))) + goto reset; + + if (unlikely(mas_is_none(mas))) + goto reset; + + if (unlikely(mas_is_overflow(mas))) + goto reset; + + if (unlikely(mas_is_underflow(mas))) + goto reset; } /* - * new_end exceeds the size of the maple node and cannot enter the fast - * path. + * A less strict version of mas_is_span_wr() where we allow spanning + * writes within this node. This is to stop partial walks in + * mas_prealloc() from being reset. */ - new_end = mas_wr_new_end(wr_mas); - if (new_end >= mt_slots[wr_mas->type]) - goto slow_path; + if (mas->last > mas->max) + goto reset; - /* Attempt to append */ - if (new_end == wr_mas->node_end + 1 && mas_wr_append(wr_mas)) - return; + if (wr_mas->entry) + goto set_content; - if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas)) - return; + if (mte_is_leaf(mas->node) && mas->last == mas->max) + goto reset; - if (mas_wr_node_store(wr_mas, new_end)) - return; + goto set_content; - if (mas_is_err(mas)) - return; +reset: + mas_reset(mas); +set_content: + wr_mas->content = mas_start(mas); +} + +/** + * mas_prealloc_calc() - Calculate number of nodes needed for a + * given store oepration + * @wr_mas: The maple write state + * @entry: The entry to store into the tree + * + * Return: Number of nodes required for preallocation. + */ +static inline void mas_prealloc_calc(struct ma_wr_state *wr_mas, void *entry) +{ + struct ma_state *mas = wr_mas->mas; + unsigned char height = mas_mt_height(mas); + int ret = height * 3 + 1; + unsigned char delta = height - wr_mas->vacant_height; + + switch (mas->store_type) { + case wr_exact_fit: + case wr_append: + case wr_slot_store: + ret = 0; + break; + case wr_spanning_store: + if (wr_mas->sufficient_height < wr_mas->vacant_height) + ret = (height - wr_mas->sufficient_height) * 3 + 1; + else + ret = delta * 3 + 1; + break; + case wr_split_store: + ret = delta * 2 + 1; + break; + case wr_rebalance: + if (wr_mas->sufficient_height < wr_mas->vacant_height) + ret = (height - wr_mas->sufficient_height) * 2 + 1; + else + ret = delta * 2 + 1; + break; + case wr_node_store: + ret = mt_in_rcu(mas->tree) ? 1 : 0; + break; + case wr_new_root: + ret = 1; + break; + case wr_store_root: + if (likely((mas->last != 0) || (mas->index != 0))) + ret = 1; + else if (((unsigned long) (entry) & 3) == 2) + ret = 1; + else + ret = 0; + break; + case wr_invalid: + WARN_ON_ONCE(1); + } -slow_path: - mas_wr_bnode(wr_mas); + mas->node_request = ret; } /* - * mas_wr_store_entry() - Internal call to store a value - * @mas: The maple state - * @entry: The entry to store. + * mas_wr_store_type() - Determine the store type for a given + * store operation. + * @wr_mas: The maple write state * - * Return: The contents that was stored at the index. + * Return: the type of store needed for the operation */ -static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas) +static inline enum store_type mas_wr_store_type(struct ma_wr_state *wr_mas) { struct ma_state *mas = wr_mas->mas; + unsigned char new_end; - wr_mas->content = mas_start(mas); - if (mas_is_none(mas) || mas_is_ptr(mas)) { - mas_store_root(mas, wr_mas->entry); - return wr_mas->content; - } + if (unlikely(mas_is_none(mas) || mas_is_ptr(mas))) + return wr_store_root; - if (unlikely(!mas_wr_walk(wr_mas))) { - mas_wr_spanning_store(wr_mas); - return wr_mas->content; - } + if (unlikely(!mas_wr_walk(wr_mas))) + return wr_spanning_store; /* At this point, we are at the leaf node that needs to be altered. */ mas_wr_end_piv(wr_mas); - if (!wr_mas->entry) mas_wr_extend_null(wr_mas); - /* New root for a single pointer */ - if (unlikely(!mas->index && mas->last == ULONG_MAX)) { - mas_new_root(mas, wr_mas->entry); - return wr_mas->content; + if ((wr_mas->r_min == mas->index) && (wr_mas->r_max == mas->last)) + return wr_exact_fit; + + if (unlikely(!mas->index && mas->last == ULONG_MAX)) + return wr_new_root; + + new_end = mas_wr_new_end(wr_mas); + /* Potential spanning rebalance collapsing a node */ + if (new_end < mt_min_slots[wr_mas->type]) { + if (!mte_is_root(mas->node)) + return wr_rebalance; + return wr_node_store; } - mas_wr_modify(wr_mas); - return wr_mas->content; + if (new_end >= mt_slots[wr_mas->type]) + return wr_split_store; + + if (!mt_in_rcu(mas->tree) && (mas->offset == mas->end)) + return wr_append; + + if ((new_end == mas->end) && (!mt_in_rcu(mas->tree) || + (wr_mas->offset_end - mas->offset == 1))) + return wr_slot_store; + + return wr_node_store; +} + +/** + * mas_wr_preallocate() - Preallocate enough nodes for a store operation + * @wr_mas: The maple write state + * @entry: The entry that will be stored + * + */ +static inline void mas_wr_preallocate(struct ma_wr_state *wr_mas, void *entry) +{ + struct ma_state *mas = wr_mas->mas; + + mas_wr_prealloc_setup(wr_mas); + mas->store_type = mas_wr_store_type(wr_mas); + mas_prealloc_calc(wr_mas, entry); + if (!mas->node_request) + return; + + mas_alloc_nodes(mas, GFP_NOWAIT); } /** @@ -4422,26 +3982,24 @@ static inline void *mas_insert(struct ma_state *mas, void *entry) if (wr_mas.content) goto exists; - if (mas_is_none(mas) || mas_is_ptr(mas)) { - mas_store_root(mas, entry); + mas_wr_preallocate(&wr_mas, entry); + if (mas_is_err(mas)) return NULL; - } /* spanning writes always overwrite something */ - if (!mas_wr_walk(&wr_mas)) + if (mas->store_type == wr_spanning_store) goto exists; /* At this point, we are at the leaf node that needs to be altered. */ - wr_mas.offset_end = mas->offset; - wr_mas.end_piv = wr_mas.r_max; - - if (wr_mas.content || (mas->last > wr_mas.r_max)) - goto exists; + if (mas->store_type != wr_new_root && mas->store_type != wr_store_root) { + wr_mas.offset_end = mas->offset; + wr_mas.end_piv = wr_mas.r_max; - if (!entry) - return NULL; + if (wr_mas.content || (mas->last > wr_mas.r_max)) + goto exists; + } - mas_wr_modify(&wr_mas); + mas_wr_store_entry(&wr_mas); return wr_mas.content; exists: @@ -4450,7 +4008,59 @@ exists: } -static inline void mas_rewalk(struct ma_state *mas, unsigned long index) +/** + * mas_alloc_cyclic() - Internal call to find somewhere to store an entry + * @mas: The maple state. + * @startp: Pointer to ID. + * @range_lo: Lower bound of range to search. + * @range_hi: Upper bound of range to search. + * @entry: The entry to store. + * @next: Pointer to next ID to allocate. + * @gfp: The GFP_FLAGS to use for allocations. + * + * Return: 0 if the allocation succeeded without wrapping, 1 if the + * allocation succeeded after wrapping, or -EBUSY if there are no + * free entries. + */ +int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp, + void *entry, unsigned long range_lo, unsigned long range_hi, + unsigned long *next, gfp_t gfp) +{ + unsigned long min = range_lo; + int ret = 0; + + range_lo = max(min, *next); + ret = mas_empty_area(mas, range_lo, range_hi, 1); + if ((mas->tree->ma_flags & MT_FLAGS_ALLOC_WRAPPED) && ret == 0) { + mas->tree->ma_flags &= ~MT_FLAGS_ALLOC_WRAPPED; + ret = 1; + } + if (ret < 0 && range_lo > min) { + mas_reset(mas); + ret = mas_empty_area(mas, min, range_hi, 1); + if (ret == 0) + ret = 1; + } + if (ret < 0) + return ret; + + do { + mas_insert(mas, entry); + } while (mas_nomem(mas, gfp)); + if (mas_is_err(mas)) + return xa_err(mas->node); + + *startp = mas->index; + *next = *startp + 1; + if (*next == 0) + mas->tree->ma_flags |= MT_FLAGS_ALLOC_WRAPPED; + + mas_destroy(mas); + return ret; +} +EXPORT_SYMBOL(mas_alloc_cyclic); + +static __always_inline void mas_rewalk(struct ma_state *mas, unsigned long index) { retry: mas_set(mas, index); @@ -4459,7 +4069,7 @@ retry: goto retry; } -static inline bool mas_rewalk_if_dead(struct ma_state *mas, +static __always_inline bool mas_rewalk_if_dead(struct ma_state *mas, struct maple_node *node, const unsigned long index) { if (unlikely(ma_dead_node(node))) { @@ -4471,14 +4081,16 @@ static inline bool mas_rewalk_if_dead(struct ma_state *mas, /* * mas_prev_node() - Find the prev non-null entry at the same level in the - * tree. The prev value will be mas->node[mas->offset] or MAS_NONE. + * tree. The prev value will be mas->node[mas->offset] or the status will be + * ma_none. * @mas: The maple state * @min: The lower limit to search * - * The prev node value will be mas->node[mas->offset] or MAS_NONE. + * The prev node value will be mas->node[mas->offset] or the status will be + * ma_none. * Return: 1 if the node is dead, 0 otherwise. */ -static inline int mas_prev_node(struct ma_state *mas, unsigned long min) +static int mas_prev_node(struct ma_state *mas, unsigned long min) { enum maple_type mt; int offset, level; @@ -4538,13 +4150,14 @@ static inline int mas_prev_node(struct ma_state *mas, unsigned long min) if (unlikely(mte_dead_node(mas->node))) return 1; + mas->end = mas->offset; return 0; no_entry: if (unlikely(ma_dead_node(node))) return 1; - mas->node = MAS_NONE; + mas->status = ma_underflow; return 0; } @@ -4552,7 +4165,8 @@ no_entry: * mas_prev_slot() - Get the entry in the previous slot * * @mas: The maple state - * @max: The minimum starting range + * @min: The minimum starting range + * @empty: Can be empty * * Return: The entry in the previous slot which is possibly NULL */ @@ -4573,7 +4187,6 @@ retry: if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) goto retry; -again: if (mas->min <= min) { pivot = mas_safe_min(mas, pivots, mas->offset); @@ -4581,20 +4194,24 @@ again: goto retry; if (pivot <= min) - return NULL; + goto underflow; } +again: if (likely(mas->offset)) { mas->offset--; mas->last = mas->index - 1; mas->index = mas_safe_min(mas, pivots, mas->offset); } else { + if (mas->index <= min) + goto underflow; + if (mas_prev_node(mas, min)) { mas_rewalk(mas, save_point); goto retry; } - if (mas_is_none(mas)) + if (WARN_ON_ONCE(mas_is_underflow(mas))) return NULL; mas->last = mas->max; @@ -4612,39 +4229,50 @@ again: if (likely(entry)) return entry; - if (!empty) + if (!empty) { + if (mas->index <= min) + goto underflow; + goto again; + } return entry; + +underflow: + mas->status = ma_underflow; + return NULL; } /* * mas_next_node() - Get the next node at the same level in the tree. * @mas: The maple state + * @node: The maple node * @max: The maximum pivot value to check. * - * The next value will be mas->node[mas->offset] or MAS_NONE. + * The next value will be mas->node[mas->offset] or the status will have + * overflowed. * Return: 1 on dead node, 0 otherwise. */ -static inline int mas_next_node(struct ma_state *mas, struct maple_node *node, - unsigned long max) +static int mas_next_node(struct ma_state *mas, struct maple_node *node, + unsigned long max) { unsigned long min; unsigned long *pivots; struct maple_enode *enode; + struct maple_node *tmp; int level = 0; unsigned char node_end; enum maple_type mt; void __rcu **slots; if (mas->max >= max) - goto no_entry; + goto overflow; min = mas->max + 1; level = 0; do { if (ma_is_root(node)) - goto no_entry; + goto overflow; /* Walk up. */ if (unlikely(mas_ascend(mas))) @@ -4684,6 +4312,10 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node, pivots = ma_pivots(node, mt); mas->max = mas_safe_pivot(mas, pivots, mas->offset, mt); + tmp = mte_to_node(enode); + mt = mte_node_type(enode); + pivots = ma_pivots(tmp, mt); + mas->end = ma_data_end(tmp, mt, pivots, mas->max); if (unlikely(ma_dead_node(node))) return 1; @@ -4691,11 +4323,11 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node, mas->min = min; return 0; -no_entry: +overflow: if (unlikely(ma_dead_node(node))) return 1; - mas->node = MAS_NONE; + mas->status = ma_overflow; return 0; } @@ -4715,7 +4347,6 @@ static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty) unsigned long pivot; enum maple_type type; struct maple_node *node; - unsigned char data_end; unsigned long save_point = mas->last; void *entry; @@ -4723,38 +4354,44 @@ retry: node = mas_mn(mas); type = mte_node_type(mas->node); pivots = ma_pivots(node, type); - data_end = ma_data_end(node, type, pivots, mas->max); if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) goto retry; -again: if (mas->max >= max) { - if (likely(mas->offset < data_end)) + if (likely(mas->offset < mas->end)) pivot = pivots[mas->offset]; else - return NULL; /* must be mas->max */ + pivot = mas->max; if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) goto retry; - if (pivot >= max) + if (pivot >= max) { /* Was at the limit, next will extend beyond */ + mas->status = ma_overflow; return NULL; + } } - if (likely(mas->offset < data_end)) { + if (likely(mas->offset < mas->end)) { mas->index = pivots[mas->offset] + 1; +again: mas->offset++; - if (likely(mas->offset < data_end)) + if (likely(mas->offset < mas->end)) mas->last = pivots[mas->offset]; else mas->last = mas->max; } else { + if (mas->last >= max) { + mas->status = ma_overflow; + return NULL; + } + if (mas_next_node(mas, node, max)) { mas_rewalk(mas, save_point); goto retry; } - if (mas_is_none(mas)) + if (WARN_ON_ONCE(mas_is_overflow(mas))) return NULL; mas->offset = 0; @@ -4773,9 +4410,14 @@ again: if (entry) return entry; + if (!empty) { - if (!mas->offset) - data_end = 2; + if (mas->last >= max) { + mas->status = ma_overflow; + return NULL; + } + + mas->index = mas->last + 1; goto again; } @@ -4783,26 +4425,6 @@ again: } /* - * mas_next_entry() - Internal function to get the next entry. - * @mas: The maple state - * @limit: The maximum range start. - * - * Set the @mas->node to the next entry and the range_start to - * the beginning value for the entry. Does not check beyond @limit. - * Sets @mas->index and @mas->last to the limit if it is hit. - * Restarts on dead nodes. - * - * Return: the next entry or %NULL. - */ -static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit) -{ - if (mas->last >= limit) - return NULL; - - return mas_next_slot(mas, limit, false); -} - -/* * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the * highest gap address of a given size in a given node and descend. * @mas: The maple state @@ -4921,7 +4543,7 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size) min = mas_safe_min(mas, pivots, offset); data_end = ma_data_end(node, type, pivots, mas->max); for (; offset <= data_end; offset++) { - pivot = mas_logical_pivot(mas, pivots, offset, type); + pivot = mas_safe_pivot(mas, pivots, offset, type); /* Not within lower bounds */ if (mas->index > pivot) @@ -4937,15 +4559,14 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size) if (gap >= size) { if (ma_is_leaf(type)) { found = true; - goto done; - } - if (mas->index <= pivot) { - mas->node = mas_slot(mas, slots, offset); - mas->min = min; - mas->max = pivot; - offset = 0; break; } + + mas->node = mas_slot(mas, slots, offset); + mas->min = min; + mas->max = pivot; + offset = 0; + break; } next_slot: min = pivot + 1; @@ -4955,9 +4576,6 @@ next_slot: } } - if (mte_is_root(mas->node)) - found = true; -done: mas->offset = offset; return found; } @@ -4967,7 +4585,7 @@ done: * @mas: The maple state. * * mas->index and mas->last will be set to the range if there is a value. If - * mas->node is MAS_NONE, reset to MAS_START. + * mas->status is ma_none, reset to ma_start * * Return: the entry at the location or %NULL. */ @@ -4975,8 +4593,8 @@ void *mas_walk(struct ma_state *mas) { void *entry; - if (mas_is_none(mas) || mas_is_paused(mas) || mas_is_ptr(mas)) - mas->node = MAS_START; + if (!mas_is_active(mas) && !mas_is_start(mas)) + mas->status = ma_start; retry: entry = mas_state_walk(mas); if (mas_is_start(mas)) { @@ -4992,7 +4610,7 @@ retry: mas->index = 1; mas->last = ULONG_MAX; - mas->node = MAS_NONE; + mas->status = ma_none; return NULL; } @@ -5061,8 +4679,8 @@ static inline void mas_awalk(struct ma_state *mas, unsigned long size) * There are 4 options: * go to child (descend) * go back to parent (ascend) - * no gap found. (return, slot == MAPLE_NODE_SLOTS) - * found the gap. (return, slot != MAPLE_NODE_SLOTS) + * no gap found. (return, error == -EBUSY) + * found the gap. (return) */ while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) { if (last == mas->node) @@ -5119,6 +4737,7 @@ int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned char offset; unsigned long *pivots; enum maple_type mt; + struct maple_node *node; if (min > max) return -EINVAL; @@ -5146,15 +4765,14 @@ int mas_empty_area(struct ma_state *mas, unsigned long min, return xa_err(mas->node); offset = mas->offset; - if (unlikely(offset == MAPLE_NODE_SLOTS)) - return -EBUSY; - + node = mas_mn(mas); mt = mte_node_type(mas->node); - pivots = ma_pivots(mas_mn(mas), mt); + pivots = ma_pivots(node, mt); min = mas_safe_min(mas, pivots, offset); if (mas->index < min) mas->index = min; mas->last = mas->index + size - 1; + mas->end = ma_data_end(node, mt, pivots, mas->max); return 0; } EXPORT_SYMBOL_GPL(mas_empty_area); @@ -5178,18 +4796,18 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, if (size == 0 || max - min < size - 1) return -EINVAL; - if (mas_is_start(mas)) { + if (mas_is_start(mas)) mas_start(mas); - mas->offset = mas_data_end(mas); - } else if (mas->offset >= 2) { - mas->offset -= 2; - } else if (!mas_rewind_node(mas)) { + else if ((mas->offset < 2) && (!mas_rewind_node(mas))) return -EBUSY; - } - /* Empty set. */ - if (mas_is_none(mas) || mas_is_ptr(mas)) + if (unlikely(mas_is_none(mas) || mas_is_ptr(mas))) return mas_sparse_area(mas, min, max, size, false); + else if (mas->offset >= 2) + mas->offset -= 2; + else + mas->offset = mas_data_end(mas); + /* The start of the window can only be within these values. */ mas->index = min; @@ -5215,15 +4833,16 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, mas->last = max; mas->index = mas->last - size + 1; + mas->end = mas_data_end(mas); return 0; } EXPORT_SYMBOL_GPL(mas_empty_area_rev); /* * mte_dead_leaves() - Mark all leaves of a node as dead. - * @mas: The maple state + * @enode: the encoded node + * @mt: the maple tree * @slots: Pointer to the slot array - * @type: The maple node type * * Must hold the write lock. * @@ -5322,7 +4941,7 @@ static void mt_free_walk(struct rcu_head *head) mt_free_bulk(node->slot_len, slots); free_leaf: - mt_free_rcu(&node->rcu); + kfree(node); } static inline void __rcu **mte_destroy_descend(struct maple_enode **enode, @@ -5363,6 +4982,7 @@ static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt, struct maple_enode *start; if (mte_is_leaf(enode)) { + mte_set_node_dead(enode); node->type = mte_node_type(enode); goto free_leaf; } @@ -5405,7 +5025,7 @@ next: free_leaf: if (free) - mt_free_rcu(&node->rcu); + kfree(node); else mt_clear_meta(mt, node, node->type); } @@ -5429,24 +5049,6 @@ static inline void mte_destroy_walk(struct maple_enode *enode, mt_destroy_walk(enode, mt, true); } } - -static void mas_wr_store_setup(struct ma_wr_state *wr_mas) -{ - if (unlikely(mas_is_paused(wr_mas->mas))) - mas_reset(wr_mas->mas); - - if (!mas_is_start(wr_mas->mas)) { - if (mas_is_none(wr_mas->mas)) { - mas_reset(wr_mas->mas); - } else { - wr_mas->r_max = wr_mas->mas->max; - wr_mas->type = mte_node_type(wr_mas->mas->node); - if (mas_is_span_wr(wr_mas)) - mas_reset(wr_mas->mas); - } - } -} - /* Interface */ /** @@ -5455,8 +5057,6 @@ static void mas_wr_store_setup(struct ma_wr_state *wr_mas) * @entry: The entry to store. * * The @mas->index and @mas->last is used to set the range for the @entry. - * Note: The @mas should have pre-allocated entries to ensure there is memory to - * store the entry. Please see mas_expected_entries()/mas_destroy() for more details. * * Return: the first entry between mas->index and mas->last or %NULL. */ @@ -5464,10 +5064,11 @@ void *mas_store(struct ma_state *mas, void *entry) { MA_WR_STATE(wr_mas, mas, entry); - trace_ma_write(__func__, mas, 0, entry); + trace_ma_write(TP_FCT, mas, 0, entry); #ifdef CONFIG_DEBUG_MAPLE_TREE if (MAS_WARN_ON(mas, mas->index > mas->last)) - pr_err("Error %lX > %lX %p\n", mas->index, mas->last, entry); + pr_err("Error %lX > %lX " PTR_FMT "\n", mas->index, mas->last, + entry); if (mas->index > mas->last) { mas_set_err(mas, -EINVAL); @@ -5482,8 +5083,25 @@ void *mas_store(struct ma_state *mas, void *entry) * want to examine what happens if a single store operation was to * overwrite multiple entries within a self-balancing B-Tree. */ - mas_wr_store_setup(&wr_mas); + mas_wr_prealloc_setup(&wr_mas); + mas->store_type = mas_wr_store_type(&wr_mas); + if (mas->mas_flags & MA_STATE_PREALLOC) { + mas_wr_store_entry(&wr_mas); + MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas)); + return wr_mas.content; + } + + mas_prealloc_calc(&wr_mas, entry); + if (!mas->node_request) + goto store; + + mas_alloc_nodes(mas, GFP_NOWAIT); + if (mas_is_err(mas)) + return NULL; + +store: mas_wr_store_entry(&wr_mas); + mas_destroy(mas); return wr_mas.content; } EXPORT_SYMBOL_GPL(mas_store); @@ -5499,19 +5117,28 @@ EXPORT_SYMBOL_GPL(mas_store); */ int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp) { + unsigned long index = mas->index; + unsigned long last = mas->last; MA_WR_STATE(wr_mas, mas, entry); + int ret = 0; - mas_wr_store_setup(&wr_mas); - trace_ma_write(__func__, mas, 0, entry); retry: - mas_wr_store_entry(&wr_mas); - if (unlikely(mas_nomem(mas, gfp))) + mas_wr_preallocate(&wr_mas, entry); + if (unlikely(mas_nomem(mas, gfp))) { + if (!entry) + __mas_set_range(mas, index, last); goto retry; + } - if (unlikely(mas_is_err(mas))) - return xa_err(mas->node); + if (mas_is_err(mas)) { + ret = xa_err(mas->node); + goto out; + } - return 0; + mas_wr_store_entry(&wr_mas); +out: + mas_destroy(mas); + return ret; } EXPORT_SYMBOL_GPL(mas_store_gfp); @@ -5525,8 +5152,20 @@ void mas_store_prealloc(struct ma_state *mas, void *entry) { MA_WR_STATE(wr_mas, mas, entry); - mas_wr_store_setup(&wr_mas); - trace_ma_write(__func__, mas, 0, entry); + if (mas->store_type == wr_store_root) { + mas_wr_prealloc_setup(&wr_mas); + goto store; + } + + mas_wr_walk_descend(&wr_mas); + if (mas->store_type != wr_spanning_store) { + /* set wr_mas->content to current slot */ + wr_mas.content = mas_slot_locked(mas, wr_mas.slots, mas->offset); + mas_wr_end_piv(&wr_mas); + } + +store: + trace_ma_write(TP_FCT, mas, 0, entry); mas_wr_store_entry(&wr_mas); MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas)); mas_destroy(mas); @@ -5536,25 +5175,35 @@ EXPORT_SYMBOL_GPL(mas_store_prealloc); /** * mas_preallocate() - Preallocate enough nodes for a store operation * @mas: The maple state + * @entry: The entry that will be stored * @gfp: The GFP_FLAGS to use for allocations. * * Return: 0 on success, -ENOMEM if memory could not be allocated. */ -int mas_preallocate(struct ma_state *mas, gfp_t gfp) +int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp) { - int ret; + MA_WR_STATE(wr_mas, mas, entry); - mas_node_count_gfp(mas, 1 + mas_mt_height(mas) * 3, gfp); - mas->mas_flags |= MA_STATE_PREALLOC; - if (likely(!mas_is_err(mas))) - return 0; + mas_wr_prealloc_setup(&wr_mas); + mas->store_type = mas_wr_store_type(&wr_mas); + mas_prealloc_calc(&wr_mas, entry); + if (!mas->node_request) + goto set_flag; - mas_set_alloc_req(mas, 0); - ret = xa_err(mas->node); - mas_reset(mas); - mas_destroy(mas); - mas_reset(mas); - return ret; + mas->mas_flags &= ~MA_STATE_PREALLOC; + mas_alloc_nodes(mas, gfp); + if (mas_is_err(mas)) { + int ret = xa_err(mas->node); + + mas->node_request = 0; + mas_destroy(mas); + mas_reset(mas); + return ret; + } + +set_flag: + mas->mas_flags |= MA_STATE_PREALLOC; + return 0; } EXPORT_SYMBOL_GPL(mas_preallocate); @@ -5568,118 +5217,62 @@ EXPORT_SYMBOL_GPL(mas_preallocate); */ void mas_destroy(struct ma_state *mas) { - struct maple_alloc *node; - unsigned long total; - - /* - * When using mas_for_each() to insert an expected number of elements, - * it is possible that the number inserted is less than the expected - * number. To fix an invalid final node, a check is performed here to - * rebalance the previous node with the final node. - */ - if (mas->mas_flags & MA_STATE_REBALANCE) { - unsigned char end; - - mas_start(mas); - mtree_range_walk(mas); - end = mas_data_end(mas) + 1; - if (end < mt_min_slot_count(mas->node) - 1) - mas_destroy_rebalance(mas, end); - - mas->mas_flags &= ~MA_STATE_REBALANCE; - } - mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC); - - total = mas_allocated(mas); - while (total) { - node = mas->alloc; - mas->alloc = node->slot[0]; - if (node->node_count > 1) { - size_t count = node->node_count - 1; - - mt_free_bulk(count, (void __rcu **)&node->slot[1]); - total -= count; - } - kmem_cache_free(maple_node_cache, node); - total--; - } - - mas->alloc = NULL; + mas->mas_flags &= ~MA_STATE_PREALLOC; + mas_empty_nodes(mas); } EXPORT_SYMBOL_GPL(mas_destroy); -/* - * mas_expected_entries() - Set the expected number of entries that will be inserted. - * @mas: The maple state - * @nr_entries: The number of expected entries. - * - * This will attempt to pre-allocate enough nodes to store the expected number - * of entries. The allocations will occur using the bulk allocator interface - * for speed. Please call mas_destroy() on the @mas after inserting the entries - * to ensure any unused nodes are freed. - * - * Return: 0 on success, -ENOMEM if memory could not be allocated. - */ -int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries) +static void mas_may_activate(struct ma_state *mas) { - int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2; - struct maple_enode *enode = mas->node; - int nr_nodes; - int ret; - - /* - * Sometimes it is necessary to duplicate a tree to a new tree, such as - * forking a process and duplicating the VMAs from one tree to a new - * tree. When such a situation arises, it is known that the new tree is - * not going to be used until the entire tree is populated. For - * performance reasons, it is best to use a bulk load with RCU disabled. - * This allows for optimistic splitting that favours the left and reuse - * of nodes during the operation. - */ - - /* Optimize splitting for bulk insert in-order */ - mas->mas_flags |= MA_STATE_BULK; - - /* - * Avoid overflow, assume a gap between each entry and a trailing null. - * If this is wrong, it just means allocation can happen during - * insertion of entries. - */ - nr_nodes = max(nr_entries, nr_entries * 2 + 1); - if (!mt_is_alloc(mas->tree)) - nonleaf_cap = MAPLE_RANGE64_SLOTS - 2; - - /* Leaves; reduce slots to keep space for expansion */ - nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2); - /* Internal nodes */ - nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap); - /* Add working room for split (2 nodes) + new parents */ - mas_node_count(mas, nr_nodes + 3); - - /* Detect if allocations run out */ - mas->mas_flags |= MA_STATE_PREALLOC; - - if (!mas_is_err(mas)) - return 0; - - ret = xa_err(mas->node); - mas->node = enode; - mas_destroy(mas); - return ret; - + if (!mas->node) { + mas->status = ma_start; + } else if (mas->index > mas->max || mas->index < mas->min) { + mas->status = ma_start; + } else { + mas->status = ma_active; + } } -EXPORT_SYMBOL_GPL(mas_expected_entries); -static inline bool mas_next_setup(struct ma_state *mas, unsigned long max, +static bool mas_next_setup(struct ma_state *mas, unsigned long max, void **entry) { bool was_none = mas_is_none(mas); - if (mas_is_none(mas) || mas_is_paused(mas)) - mas->node = MAS_START; + if (unlikely(mas->last >= max)) { + mas->status = ma_overflow; + return true; + } - if (mas_is_start(mas)) - *entry = mas_walk(mas); /* Retries on dead nodes handled by mas_walk */ + switch (mas->status) { + case ma_active: + return false; + case ma_none: + fallthrough; + case ma_pause: + mas->status = ma_start; + fallthrough; + case ma_start: + mas_walk(mas); /* Retries on dead nodes handled by mas_walk */ + break; + case ma_overflow: + /* Overflowed before, but the max changed */ + mas_may_activate(mas); + break; + case ma_underflow: + /* The user expects the mas to be one before where it is */ + mas_may_activate(mas); + *entry = mas_walk(mas); + if (*entry) + return true; + break; + case ma_root: + break; + case ma_error: + return true; + } + + if (likely(mas_is_active(mas))) /* Fast path */ + return false; if (mas_is_ptr(mas)) { *entry = NULL; @@ -5689,12 +5282,13 @@ static inline bool mas_next_setup(struct ma_state *mas, unsigned long max, } mas->index = 1; mas->last = ULONG_MAX; - mas->node = MAS_NONE; + mas->status = ma_none; return true; } if (mas_is_none(mas)) return true; + return false; } @@ -5750,7 +5344,11 @@ EXPORT_SYMBOL_GPL(mas_next_range); * @index: The start index * @max: The maximum index to check * - * Return: The entry at @index or higher, or %NULL if nothing is found. + * Takes RCU read lock internally to protect the search, which does not + * protect the returned pointer after dropping RCU read lock. + * See also: Documentation/core-api/maple_tree.rst + * + * Return: The entry higher than @index or %NULL if nothing is found. */ void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max) { @@ -5764,24 +5362,48 @@ void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max) } EXPORT_SYMBOL_GPL(mt_next); -static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min, - void **entry) +static bool mas_prev_setup(struct ma_state *mas, unsigned long min, void **entry) { - if (mas->index <= min) - goto none; + if (unlikely(mas->index <= min)) { + mas->status = ma_underflow; + return true; + } - if (mas_is_none(mas) || mas_is_paused(mas)) - mas->node = MAS_START; + switch (mas->status) { + case ma_active: + return false; + case ma_start: + break; + case ma_none: + fallthrough; + case ma_pause: + mas->status = ma_start; + break; + case ma_underflow: + /* underflowed before but the min changed */ + mas_may_activate(mas); + break; + case ma_overflow: + /* User expects mas to be one after where it is */ + mas_may_activate(mas); + *entry = mas_walk(mas); + if (*entry) + return true; + break; + case ma_root: + break; + case ma_error: + return true; + } - if (mas_is_start(mas)) { + if (mas_is_start(mas)) mas_walk(mas); - if (!mas->index) - goto none; - } if (unlikely(mas_is_ptr(mas))) { - if (!mas->index) - goto none; + if (!mas->index) { + mas->status = ma_none; + return true; + } mas->index = mas->last = 0; *entry = mas_root(mas); return true; @@ -5791,7 +5413,7 @@ static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min, if (mas->index) { /* Walked to out-of-range pointer? */ mas->index = mas->last = 0; - mas->node = MAS_ROOT; + mas->status = ma_root; *entry = mas_root(mas); return true; } @@ -5799,10 +5421,6 @@ static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min, } return false; - -none: - mas->node = MAS_NONE; - return true; } /** @@ -5811,7 +5429,7 @@ none: * @min: The minimum value to check. * * Must hold rcu_read_lock or the write lock. - * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not + * Will reset mas to ma_start if the status is ma_none. Will stop on not * searchable nodes. * * Return: the previous value or %NULL. @@ -5834,7 +5452,7 @@ EXPORT_SYMBOL_GPL(mas_prev); * * Sets @mas->index and @mas->last to the range. * Must hold rcu_read_lock or the write lock. - * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not + * Will reset mas to ma_start if the node is ma_none. Will stop on not * searchable nodes. * * Return: the previous value or %NULL. @@ -5856,7 +5474,11 @@ EXPORT_SYMBOL_GPL(mas_prev_range); * @index: The start index * @min: The minimum index to check * - * Return: The entry at @index or lower, or %NULL if nothing is found. + * Takes RCU read lock internally to protect the search, which does not + * protect the returned pointer after dropping RCU read lock. + * See also: Documentation/core-api/maple_tree.rst + * + * Return: The entry before @index or %NULL if nothing is found. */ void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min) { @@ -5885,7 +5507,8 @@ EXPORT_SYMBOL_GPL(mt_prev); */ void mas_pause(struct ma_state *mas) { - mas->node = MAS_PAUSE; + mas->status = ma_pause; + mas->node = NULL; } EXPORT_SYMBOL_GPL(mas_pause); @@ -5897,27 +5520,57 @@ EXPORT_SYMBOL_GPL(mas_pause); * * Returns: True if entry is the answer, false otherwise. */ -static inline bool mas_find_setup(struct ma_state *mas, unsigned long max, - void **entry) +static __always_inline bool mas_find_setup(struct ma_state *mas, unsigned long max, void **entry) { - *entry = NULL; + switch (mas->status) { + case ma_active: + if (mas->last < max) + return false; + return true; + case ma_start: + break; + case ma_pause: + if (unlikely(mas->last >= max)) + return true; - if (unlikely(mas_is_none(mas))) { + mas->index = ++mas->last; + mas->status = ma_start; + break; + case ma_none: if (unlikely(mas->last >= max)) return true; mas->index = mas->last; - mas->node = MAS_START; - } else if (unlikely(mas_is_paused(mas))) { + mas->status = ma_start; + break; + case ma_underflow: + /* mas is pointing at entry before unable to go lower */ + if (unlikely(mas->index >= max)) { + mas->status = ma_overflow; + return true; + } + + mas_may_activate(mas); + *entry = mas_walk(mas); + if (*entry) + return true; + break; + case ma_overflow: if (unlikely(mas->last >= max)) return true; - mas->node = MAS_START; - mas->index = ++mas->last; - } else if (unlikely(mas_is_ptr(mas))) - goto ptr_out_of_range; + mas_may_activate(mas); + *entry = mas_walk(mas); + if (*entry) + return true; + break; + case ma_root: + break; + case ma_error: + return true; + } - if (unlikely(mas_is_start(mas))) { + if (mas_is_start(mas)) { /* First run or continue */ if (mas->index > max) return true; @@ -5928,12 +5581,11 @@ static inline bool mas_find_setup(struct ma_state *mas, unsigned long max, } - if (unlikely(!mas_searchable(mas))) { - if (unlikely(mas_is_ptr(mas))) - goto ptr_out_of_range; + if (unlikely(mas_is_ptr(mas))) + goto ptr_out_of_range; + if (unlikely(mas_is_none(mas))) return true; - } if (mas->index == max) return true; @@ -5941,7 +5593,7 @@ static inline bool mas_find_setup(struct ma_state *mas, unsigned long max, return false; ptr_out_of_range: - mas->node = MAS_NONE; + mas->status = ma_none; mas->index = 1; mas->last = ULONG_MAX; return true; @@ -5955,7 +5607,7 @@ ptr_out_of_range: * * Must hold rcu_read_lock or the write lock. * If an entry exists, last and index are updated accordingly. - * May set @mas->node to MAS_NONE. + * May set @mas->status to ma_overflow. * * Return: The entry or %NULL. */ @@ -5967,7 +5619,10 @@ void *mas_find(struct ma_state *mas, unsigned long max) return entry; /* Retries on dead nodes handled by mas_next_slot */ - return mas_next_slot(mas, max, false); + entry = mas_next_slot(mas, max, false); + /* Ignore overflow */ + mas->status = ma_active; + return entry; } EXPORT_SYMBOL_GPL(mas_find); @@ -5979,13 +5634,13 @@ EXPORT_SYMBOL_GPL(mas_find); * * Must hold rcu_read_lock or the write lock. * If an entry exists, last and index are updated accordingly. - * May set @mas->node to MAS_NONE. + * May set @mas->status to ma_overflow. * * Return: The entry or %NULL. */ void *mas_find_range(struct ma_state *mas, unsigned long max) { - void *entry; + void *entry = NULL; if (mas_find_setup(mas, max, &entry)) return entry; @@ -6003,29 +5658,51 @@ EXPORT_SYMBOL_GPL(mas_find_range); * * Returns: True if entry is the answer, false otherwise. */ -static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min, +static bool mas_find_rev_setup(struct ma_state *mas, unsigned long min, void **entry) { - *entry = NULL; - if (unlikely(mas_is_none(mas))) { + switch (mas->status) { + case ma_active: + goto active; + case ma_start: + break; + case ma_pause: + if (unlikely(mas->index <= min)) { + mas->status = ma_underflow; + return true; + } + mas->last = --mas->index; + mas->status = ma_start; + break; + case ma_none: if (mas->index <= min) goto none; mas->last = mas->index; - mas->node = MAS_START; - } - - if (unlikely(mas_is_paused(mas))) { + mas->status = ma_start; + break; + case ma_overflow: /* user expects the mas to be one after where it is */ if (unlikely(mas->index <= min)) { - mas->node = MAS_NONE; + mas->status = ma_underflow; return true; } - mas->node = MAS_START; - mas->last = --mas->index; + + mas->status = ma_active; + break; + case ma_underflow: /* user expects the mas to be one before where it is */ + if (unlikely(mas->index <= min)) + return true; + + mas->status = ma_active; + break; + case ma_root: + break; + case ma_error: + return true; } - if (unlikely(mas_is_start(mas))) { + if (mas_is_start(mas)) { /* First run or continue */ if (mas->index < min) return true; @@ -6035,29 +5712,28 @@ static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min, return true; } - if (unlikely(!mas_searchable(mas))) { - if (mas_is_ptr(mas)) - goto none; + if (unlikely(mas_is_ptr(mas))) + goto none; - if (mas_is_none(mas)) { - /* - * Walked to the location, and there was nothing so the - * previous location is 0. - */ - mas->last = mas->index = 0; - mas->node = MAS_ROOT; - *entry = mas_root(mas); - return true; - } + if (unlikely(mas_is_none(mas))) { + /* + * Walked to the location, and there was nothing so the previous + * location is 0. + */ + mas->last = mas->index = 0; + mas->status = ma_root; + *entry = mas_root(mas); + return true; } +active: if (mas->index < min) return true; return false; none: - mas->node = MAS_NONE; + mas->status = ma_none; return true; } @@ -6070,13 +5746,13 @@ none: * * Must hold rcu_read_lock or the write lock. * If an entry exists, last and index are updated accordingly. - * May set @mas->node to MAS_NONE. + * May set @mas->status to ma_underflow. * * Return: The entry or %NULL. */ void *mas_find_rev(struct ma_state *mas, unsigned long min) { - void *entry; + void *entry = NULL; if (mas_find_rev_setup(mas, min, &entry)) return entry; @@ -6096,13 +5772,13 @@ EXPORT_SYMBOL_GPL(mas_find_rev); * * Must hold rcu_read_lock or the write lock. * If an entry exists, last and index are updated accordingly. - * May set @mas->node to MAS_NONE. + * May set @mas->status to ma_underflow. * * Return: The entry or %NULL. */ void *mas_find_range_rev(struct ma_state *mas, unsigned long min) { - void *entry; + void *entry = NULL; if (mas_find_rev_setup(mas, min, &entry)) return entry; @@ -6126,24 +5802,32 @@ EXPORT_SYMBOL_GPL(mas_find_range_rev); void *mas_erase(struct ma_state *mas) { void *entry; + unsigned long index = mas->index; MA_WR_STATE(wr_mas, mas, NULL); - if (mas_is_none(mas) || mas_is_paused(mas)) - mas->node = MAS_START; + if (!mas_is_active(mas) || !mas_is_start(mas)) + mas->status = ma_start; - /* Retry unnecessary when holding the write lock. */ +write_retry: entry = mas_state_walk(mas); if (!entry) return NULL; -write_retry: /* Must reset to ensure spanning writes of last slot are detected */ mas_reset(mas); - mas_wr_store_setup(&wr_mas); - mas_wr_store_entry(&wr_mas); - if (mas_nomem(mas, GFP_KERNEL)) + mas_wr_preallocate(&wr_mas, NULL); + if (mas_nomem(mas, GFP_KERNEL)) { + /* in case the range of entry changed when unlocked */ + mas->index = mas->last = index; goto write_retry; + } + if (mas_is_err(mas)) + goto out; + + mas_wr_store_entry(&wr_mas); +out: + mas_destroy(mas); return entry; } EXPORT_SYMBOL_GPL(mas_erase); @@ -6158,10 +5842,8 @@ EXPORT_SYMBOL_GPL(mas_erase); bool mas_nomem(struct ma_state *mas, gfp_t gfp) __must_hold(mas->tree->ma_lock) { - if (likely(mas->node != MA_ERROR(-ENOMEM))) { - mas_destroy(mas); + if (likely(mas->node != MA_ERROR(-ENOMEM))) return false; - } if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) { mtree_unlock(mas->tree); @@ -6171,18 +5853,23 @@ bool mas_nomem(struct ma_state *mas, gfp_t gfp) mas_alloc_nodes(mas, gfp); } - if (!mas_allocated(mas)) + if (!mas->sheaf && !mas->alloc) return false; - mas->node = MAS_START; + mas->status = ma_start; return true; } void __init maple_tree_init(void) { + struct kmem_cache_args args = { + .align = sizeof(struct maple_node), + .sheaf_capacity = 32, + }; + maple_node_cache = kmem_cache_create("maple_node", - sizeof(struct maple_node), sizeof(struct maple_node), - SLAB_PANIC, NULL); + sizeof(struct maple_node), &args, + SLAB_PANIC); } /** @@ -6197,7 +5884,7 @@ void *mtree_load(struct maple_tree *mt, unsigned long index) MA_STATE(mas, mt, index, index); void *entry; - trace_ma_read(__func__, &mas); + trace_ma_read(TP_FCT, &mas); rcu_read_lock(); retry: entry = mas_start(&mas); @@ -6238,9 +5925,9 @@ int mtree_store_range(struct maple_tree *mt, unsigned long index, unsigned long last, void *entry, gfp_t gfp) { MA_STATE(mas, mt, index, last); - MA_WR_STATE(wr_mas, &mas, entry); + int ret = 0; - trace_ma_write(__func__, &mas, 0, entry); + trace_ma_write(TP_FCT, &mas, 0, entry); if (WARN_ON_ONCE(xa_is_advanced(entry))) return -EINVAL; @@ -6248,16 +5935,10 @@ int mtree_store_range(struct maple_tree *mt, unsigned long index, return -EINVAL; mtree_lock(mt); -retry: - mas_wr_store_entry(&wr_mas); - if (mas_nomem(&mas, gfp)) - goto retry; - + ret = mas_store_gfp(&mas, entry, gfp); mtree_unlock(mt); - if (mas_is_err(&mas)) - return xa_err(mas.node); - return 0; + return ret; } EXPORT_SYMBOL(mtree_store_range); @@ -6279,7 +5960,7 @@ int mtree_store(struct maple_tree *mt, unsigned long index, void *entry, EXPORT_SYMBOL(mtree_store); /** - * mtree_insert_range() - Insert an entry at a give range if there is no value. + * mtree_insert_range() - Insert an entry at a given range if there is no value. * @mt: The maple tree * @first: The start of the range * @last: The end of the range @@ -6293,6 +5974,7 @@ int mtree_insert_range(struct maple_tree *mt, unsigned long first, unsigned long last, void *entry, gfp_t gfp) { MA_STATE(ms, mt, first, last); + int ret = 0; if (WARN_ON_ONCE(xa_is_advanced(entry))) return -EINVAL; @@ -6308,18 +5990,19 @@ retry: mtree_unlock(mt); if (mas_is_err(&ms)) - return xa_err(ms.node); + ret = xa_err(ms.node); - return 0; + mas_destroy(&ms); + return ret; } EXPORT_SYMBOL(mtree_insert_range); /** - * mtree_insert() - Insert an entry at a give index if there is no value. + * mtree_insert() - Insert an entry at a given index if there is no value. * @mt: The maple tree * @index : The index to store the value * @entry: The entry to store - * @gfp: The FGP_FLAGS to use for allocations. + * @gfp: The GFP_FLAGS to use for allocations. * * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid * request, -ENOMEM if memory could not be allocated. @@ -6365,10 +6048,54 @@ retry: unlock: mtree_unlock(mt); + mas_destroy(&mas); return ret; } EXPORT_SYMBOL(mtree_alloc_range); +/** + * mtree_alloc_cyclic() - Find somewhere to store this entry in the tree. + * @mt: The maple tree. + * @startp: Pointer to ID. + * @range_lo: Lower bound of range to search. + * @range_hi: Upper bound of range to search. + * @entry: The entry to store. + * @next: Pointer to next ID to allocate. + * @gfp: The GFP_FLAGS to use for allocations. + * + * Finds an empty entry in @mt after @next, stores the new index into + * the @id pointer, stores the entry at that index, then updates @next. + * + * @mt must be initialized with the MT_FLAGS_ALLOC_RANGE flag. + * + * Context: Any context. Takes and releases the mt.lock. May sleep if + * the @gfp flags permit. + * + * Return: 0 if the allocation succeeded without wrapping, 1 if the + * allocation succeeded after wrapping, -ENOMEM if memory could not be + * allocated, -EINVAL if @mt cannot be used, or -EBUSY if there are no + * free entries. + */ +int mtree_alloc_cyclic(struct maple_tree *mt, unsigned long *startp, + void *entry, unsigned long range_lo, unsigned long range_hi, + unsigned long *next, gfp_t gfp) +{ + int ret; + + MA_STATE(mas, mt, 0, 0); + + if (!mt_is_alloc(mt)) + return -EINVAL; + if (WARN_ON_ONCE(mt_is_reserved(entry))) + return -EINVAL; + mtree_lock(mt); + ret = mas_alloc_cyclic(&mas, startp, entry, range_lo, range_hi, + next, gfp); + mtree_unlock(mt); + return ret; +} +EXPORT_SYMBOL(mtree_alloc_cyclic); + int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, void *entry, unsigned long size, unsigned long min, unsigned long max, gfp_t gfp) @@ -6403,6 +6130,7 @@ retry: unlock: mtree_unlock(mt); + mas_destroy(&mas); return ret; } EXPORT_SYMBOL(mtree_alloc_rrange); @@ -6422,7 +6150,7 @@ void *mtree_erase(struct maple_tree *mt, unsigned long index) void *entry = NULL; MA_STATE(mas, mt, index, index); - trace_ma_op(__func__, &mas); + trace_ma_op(TP_FCT, &mas); mtree_lock(mt); entry = mas_erase(&mas); @@ -6432,6 +6160,277 @@ void *mtree_erase(struct maple_tree *mt, unsigned long index) } EXPORT_SYMBOL(mtree_erase); +/* + * mas_dup_free() - Free an incomplete duplication of a tree. + * @mas: The maple state of a incomplete tree. + * + * The parameter @mas->node passed in indicates that the allocation failed on + * this node. This function frees all nodes starting from @mas->node in the + * reverse order of mas_dup_build(). There is no need to hold the source tree + * lock at this time. + */ +static void mas_dup_free(struct ma_state *mas) +{ + struct maple_node *node; + enum maple_type type; + void __rcu **slots; + unsigned char count, i; + + /* Maybe the first node allocation failed. */ + if (mas_is_none(mas)) + return; + + while (!mte_is_root(mas->node)) { + mas_ascend(mas); + if (mas->offset) { + mas->offset--; + do { + mas_descend(mas); + mas->offset = mas_data_end(mas); + } while (!mte_is_leaf(mas->node)); + + mas_ascend(mas); + } + + node = mte_to_node(mas->node); + type = mte_node_type(mas->node); + slots = ma_slots(node, type); + count = mas_data_end(mas) + 1; + for (i = 0; i < count; i++) + ((unsigned long *)slots)[i] &= ~MAPLE_NODE_MASK; + mt_free_bulk(count, slots); + } + + node = mte_to_node(mas->node); + kfree(node); +} + +/* + * mas_copy_node() - Copy a maple node and replace the parent. + * @mas: The maple state of source tree. + * @new_mas: The maple state of new tree. + * @parent: The parent of the new node. + * + * Copy @mas->node to @new_mas->node, set @parent to be the parent of + * @new_mas->node. If memory allocation fails, @mas is set to -ENOMEM. + */ +static inline void mas_copy_node(struct ma_state *mas, struct ma_state *new_mas, + struct maple_pnode *parent) +{ + struct maple_node *node = mte_to_node(mas->node); + struct maple_node *new_node = mte_to_node(new_mas->node); + unsigned long val; + + /* Copy the node completely. */ + memcpy(new_node, node, sizeof(struct maple_node)); + /* Update the parent node pointer. */ + val = (unsigned long)node->parent & MAPLE_NODE_MASK; + new_node->parent = ma_parent_ptr(val | (unsigned long)parent); +} + +/* + * mas_dup_alloc() - Allocate child nodes for a maple node. + * @mas: The maple state of source tree. + * @new_mas: The maple state of new tree. + * @gfp: The GFP_FLAGS to use for allocations. + * + * This function allocates child nodes for @new_mas->node during the duplication + * process. If memory allocation fails, @mas is set to -ENOMEM. + */ +static inline void mas_dup_alloc(struct ma_state *mas, struct ma_state *new_mas, + gfp_t gfp) +{ + struct maple_node *node = mte_to_node(mas->node); + struct maple_node *new_node = mte_to_node(new_mas->node); + enum maple_type type; + unsigned char count, i; + void __rcu **slots; + void __rcu **new_slots; + unsigned long val; + + /* Allocate memory for child nodes. */ + type = mte_node_type(mas->node); + new_slots = ma_slots(new_node, type); + count = mas->node_request = mas_data_end(mas) + 1; + mas_alloc_nodes(mas, gfp); + if (unlikely(mas_is_err(mas))) + return; + + slots = ma_slots(node, type); + for (i = 0; i < count; i++) { + val = (unsigned long)mt_slot_locked(mas->tree, slots, i); + val &= MAPLE_NODE_MASK; + new_slots[i] = ma_mnode_ptr((unsigned long)mas_pop_node(mas) | + val); + } +} + +/* + * mas_dup_build() - Build a new maple tree from a source tree + * @mas: The maple state of source tree, need to be in MAS_START state. + * @new_mas: The maple state of new tree, need to be in MAS_START state. + * @gfp: The GFP_FLAGS to use for allocations. + * + * This function builds a new tree in DFS preorder. If the memory allocation + * fails, the error code -ENOMEM will be set in @mas, and @new_mas points to the + * last node. mas_dup_free() will free the incomplete duplication of a tree. + * + * Note that the attributes of the two trees need to be exactly the same, and the + * new tree needs to be empty, otherwise -EINVAL will be set in @mas. + */ +static inline void mas_dup_build(struct ma_state *mas, struct ma_state *new_mas, + gfp_t gfp) +{ + struct maple_node *node; + struct maple_pnode *parent = NULL; + struct maple_enode *root; + enum maple_type type; + + if (unlikely(mt_attr(mas->tree) != mt_attr(new_mas->tree)) || + unlikely(!mtree_empty(new_mas->tree))) { + mas_set_err(mas, -EINVAL); + return; + } + + root = mas_start(mas); + if (mas_is_ptr(mas) || mas_is_none(mas)) + goto set_new_tree; + + node = mt_alloc_one(gfp); + if (!node) { + new_mas->status = ma_none; + mas_set_err(mas, -ENOMEM); + return; + } + + type = mte_node_type(mas->node); + root = mt_mk_node(node, type); + new_mas->node = root; + new_mas->min = 0; + new_mas->max = ULONG_MAX; + root = mte_mk_root(root); + while (1) { + mas_copy_node(mas, new_mas, parent); + if (!mte_is_leaf(mas->node)) { + /* Only allocate child nodes for non-leaf nodes. */ + mas_dup_alloc(mas, new_mas, gfp); + if (unlikely(mas_is_err(mas))) + goto empty_mas; + } else { + /* + * This is the last leaf node and duplication is + * completed. + */ + if (mas->max == ULONG_MAX) + goto done; + + /* This is not the last leaf node and needs to go up. */ + do { + mas_ascend(mas); + mas_ascend(new_mas); + } while (mas->offset == mas_data_end(mas)); + + /* Move to the next subtree. */ + mas->offset++; + new_mas->offset++; + } + + mas_descend(mas); + parent = ma_parent_ptr(mte_to_node(new_mas->node)); + mas_descend(new_mas); + mas->offset = 0; + new_mas->offset = 0; + } +done: + /* Specially handle the parent of the root node. */ + mte_to_node(root)->parent = ma_parent_ptr(mas_tree_parent(new_mas)); +set_new_tree: + /* Make them the same height */ + new_mas->tree->ma_flags = mas->tree->ma_flags; + rcu_assign_pointer(new_mas->tree->ma_root, root); +empty_mas: + mas_empty_nodes(mas); +} + +/** + * __mt_dup(): Duplicate an entire maple tree + * @mt: The source maple tree + * @new: The new maple tree + * @gfp: The GFP_FLAGS to use for allocations + * + * This function duplicates a maple tree in Depth-First Search (DFS) pre-order + * traversal. It uses memcpy() to copy nodes in the source tree and allocate + * new child nodes in non-leaf nodes. The new node is exactly the same as the + * source node except for all the addresses stored in it. It will be faster than + * traversing all elements in the source tree and inserting them one by one into + * the new tree. + * The user needs to ensure that the attributes of the source tree and the new + * tree are the same, and the new tree needs to be an empty tree, otherwise + * -EINVAL will be returned. + * Note that the user needs to manually lock the source tree and the new tree. + * + * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If + * the attributes of the two trees are different or the new tree is not an empty + * tree. + */ +int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp) +{ + int ret = 0; + MA_STATE(mas, mt, 0, 0); + MA_STATE(new_mas, new, 0, 0); + + mas_dup_build(&mas, &new_mas, gfp); + if (unlikely(mas_is_err(&mas))) { + ret = xa_err(mas.node); + if (ret == -ENOMEM) + mas_dup_free(&new_mas); + } + + return ret; +} +EXPORT_SYMBOL(__mt_dup); + +/** + * mtree_dup(): Duplicate an entire maple tree + * @mt: The source maple tree + * @new: The new maple tree + * @gfp: The GFP_FLAGS to use for allocations + * + * This function duplicates a maple tree in Depth-First Search (DFS) pre-order + * traversal. It uses memcpy() to copy nodes in the source tree and allocate + * new child nodes in non-leaf nodes. The new node is exactly the same as the + * source node except for all the addresses stored in it. It will be faster than + * traversing all elements in the source tree and inserting them one by one into + * the new tree. + * The user needs to ensure that the attributes of the source tree and the new + * tree are the same, and the new tree needs to be an empty tree, otherwise + * -EINVAL will be returned. + * + * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If + * the attributes of the two trees are different or the new tree is not an empty + * tree. + */ +int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp) +{ + int ret = 0; + MA_STATE(mas, mt, 0, 0); + MA_STATE(new_mas, new, 0, 0); + + mas_lock(&new_mas); + mas_lock_nested(&mas, SINGLE_DEPTH_NESTING); + mas_dup_build(&mas, &new_mas, gfp); + mas_unlock(&mas); + if (unlikely(mas_is_err(&mas))) { + ret = xa_err(mas.node); + if (ret == -ENOMEM) + mas_dup_free(&new_mas); + } + + mas_unlock(&new_mas); + return ret; +} +EXPORT_SYMBOL(mtree_dup); + /** * __mt_destroy() - Walk and free all nodes of a locked maple tree. * @mt: The maple tree @@ -6446,7 +6445,7 @@ void __mt_destroy(struct maple_tree *mt) if (xa_is_node(root)) mte_destroy_walk(root, mt); - mt->ma_flags = 0; + mt->ma_flags = mt_attr(mt); } EXPORT_SYMBOL_GPL(__mt_destroy); @@ -6468,9 +6467,15 @@ EXPORT_SYMBOL(mtree_destroy); * mt_find() - Search from the start up until an entry is found. * @mt: The maple tree * @index: Pointer which contains the start location of the search - * @max: The maximum value to check + * @max: The maximum value of the search range * - * Handles locking. @index will be incremented to one beyond the range. + * Takes RCU read lock internally to protect the search, which does not + * protect the returned pointer after dropping RCU read lock. + * See also: Documentation/core-api/maple_tree.rst + * + * In case that an entry is found @index is updated to point to the next + * possible entry independent whether the found entry is occupying a + * single index or a range if indices. * * Return: The entry at or after the @index or %NULL */ @@ -6482,7 +6487,7 @@ void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max) unsigned long copy = *index; #endif - trace_ma_read(__func__, &mas); + trace_ma_read(TP_FCT, &mas); if ((*index) > max) return NULL; @@ -6499,8 +6504,8 @@ retry: if (entry) goto unlock; - while (mas_searchable(&mas) && (mas.last < max)) { - entry = mas_next_entry(&mas, max); + while (mas_is_active(&mas) && (mas.last < max)) { + entry = mas_next_slot(&mas, max, false); if (likely(entry && !xa_is_zero(entry))) break; } @@ -6528,7 +6533,9 @@ EXPORT_SYMBOL(mt_find); * @index: Pointer which contains the start location of the search * @max: The maximum value to check * - * Handles locking, detects wrapping on index == 0 + * Same as mt_find() except that it checks @index for 0 before + * searching. If @index == 0, the search is aborted. This covers a wrap + * around of @index to 0 in an iterator loop. * * Return: The entry at or after the @index or %NULL */ @@ -6555,6 +6562,19 @@ void mt_set_non_kernel(unsigned int val) kmem_cache_set_non_kernel(maple_node_cache, val); } +extern void kmem_cache_set_callback(struct kmem_cache *cachep, + void (*callback)(void *)); +void mt_set_callback(void (*callback)(void *)) +{ + kmem_cache_set_callback(maple_node_cache, callback); +} + +extern void kmem_cache_set_private(struct kmem_cache *cachep, void *private); +void mt_set_private(void *private) +{ + kmem_cache_set_private(maple_node_cache, private); +} + extern unsigned long kmem_cache_get_alloc(struct kmem_cache *); unsigned long mt_get_alloc_size(void) { @@ -6579,26 +6599,6 @@ unsigned int mt_nr_allocated(void) return kmem_cache_nr_allocated(maple_node_cache); } -/* - * mas_dead_node() - Check if the maple state is pointing to a dead node. - * @mas: The maple state - * @index: The index to restore in @mas. - * - * Used in test code. - * Return: 1 if @mas has been reset to MAS_START, 0 otherwise. - */ -static inline int mas_dead_node(struct ma_state *mas, unsigned long index) -{ - if (unlikely(!mas_searchable(mas) || mas_is_start(mas))) - return 0; - - if (likely(!mte_dead_node(mas->node))) - return 0; - - mas_rewalk(mas, index); - return 1; -} - void mt_cache_shrink(void) { } @@ -6633,87 +6633,15 @@ static inline struct maple_enode *mas_get_slot(struct ma_state *mas, offset); } - -/* - * mas_first_entry() - Go the first leaf and find the first entry. - * @mas: the maple state. - * @limit: the maximum index to check. - * @*r_start: Pointer to set to the range start. - * - * Sets mas->offset to the offset of the entry, r_start to the range minimum. - * - * Return: The first entry or MAS_NONE. - */ -static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn, - unsigned long limit, enum maple_type mt) - -{ - unsigned long max; - unsigned long *pivots; - void __rcu **slots; - void *entry = NULL; - - mas->index = mas->min; - if (mas->index > limit) - goto none; - - max = mas->max; - mas->offset = 0; - while (likely(!ma_is_leaf(mt))) { - MAS_WARN_ON(mas, mte_dead_node(mas->node)); - slots = ma_slots(mn, mt); - entry = mas_slot(mas, slots, 0); - pivots = ma_pivots(mn, mt); - if (unlikely(ma_dead_node(mn))) - return NULL; - max = pivots[0]; - mas->node = entry; - mn = mas_mn(mas); - mt = mte_node_type(mas->node); - } - MAS_WARN_ON(mas, mte_dead_node(mas->node)); - - mas->max = max; - slots = ma_slots(mn, mt); - entry = mas_slot(mas, slots, 0); - if (unlikely(ma_dead_node(mn))) - return NULL; - - /* Slot 0 or 1 must be set */ - if (mas->index > limit) - goto none; - - if (likely(entry)) - return entry; - - mas->offset = 1; - entry = mas_slot(mas, slots, 1); - pivots = ma_pivots(mn, mt); - if (unlikely(ma_dead_node(mn))) - return NULL; - - mas->index = pivots[0] + 1; - if (mas->index > limit) - goto none; - - if (likely(entry)) - return entry; - -none: - if (likely(!ma_dead_node(mn))) - mas->node = MAS_NONE; - return NULL; -} - /* Depth first search, post-order */ static void mas_dfs_postorder(struct ma_state *mas, unsigned long max) { - struct maple_enode *p = MAS_NONE, *mn = mas->node; + struct maple_enode *p, *mn = mas->node; unsigned long p_min, p_max; mas_next_node(mas, mas_mn(mas), max); - if (!mas_is_none(mas)) + if (!mas_is_overflow(mas)) return; if (mte_is_root(mn)) @@ -6726,7 +6654,7 @@ static void mas_dfs_postorder(struct ma_state *mas, unsigned long max) p_min = mas->min; p_max = mas->max; mas_prev_node(mas, 0); - } while (!mas_is_none(mas)); + } while (!mas_is_underflow(mas)); mas->node = p; mas->max = p_max; @@ -6749,7 +6677,6 @@ static void mt_dump_range(unsigned long min, unsigned long max, else pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max); break; - default: case mt_dump_dec: if (min == max) pr_info("%.*s%lu: ", depth * 2, spaces, min); @@ -6764,14 +6691,14 @@ static void mt_dump_entry(void *entry, unsigned long min, unsigned long max, mt_dump_range(min, max, depth, format); if (xa_is_value(entry)) - pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry), - xa_to_value(entry), entry); + pr_cont("value %ld (0x%lx) [" PTR_FMT "]\n", xa_to_value(entry), + xa_to_value(entry), entry); else if (xa_is_zero(entry)) pr_cont("zero (%ld)\n", xa_to_internal(entry)); else if (mt_is_reserved(entry)) - pr_cont("UNKNOWN ENTRY (%p)\n", entry); + pr_cont("UNKNOWN ENTRY (" PTR_FMT ")\n", entry); else - pr_cont("%p\n", entry); + pr_cont(PTR_FMT "\n", entry); } static void mt_dump_range64(const struct maple_tree *mt, void *entry, @@ -6787,14 +6714,13 @@ static void mt_dump_range64(const struct maple_tree *mt, void *entry, for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) { switch(format) { case mt_dump_hex: - pr_cont("%p %lX ", node->slot[i], node->pivot[i]); + pr_cont(PTR_FMT " %lX ", node->slot[i], node->pivot[i]); break; - default: case mt_dump_dec: - pr_cont("%p %lu ", node->slot[i], node->pivot[i]); + pr_cont(PTR_FMT " %lu ", node->slot[i], node->pivot[i]); } } - pr_cont("%p\n", node->slot[i]); + pr_cont(PTR_FMT "\n", node->slot[i]); for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) { unsigned long last = max; @@ -6816,12 +6742,11 @@ static void mt_dump_range64(const struct maple_tree *mt, void *entry, if (last > max) { switch(format) { case mt_dump_hex: - pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n", + pr_err("node " PTR_FMT " last (%lx) > max (%lx) at pivot %d!\n", node, last, max, i); break; - default: case mt_dump_dec: - pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", + pr_err("node " PTR_FMT " last (%lu) > max (%lu) at pivot %d!\n", node, last, max, i); } } @@ -6834,17 +6759,30 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry, enum mt_dump_format format) { struct maple_arange_64 *node = &mte_to_node(entry)->ma64; - bool leaf = mte_is_leaf(entry); unsigned long first = min; int i; pr_cont(" contents: "); - for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) - pr_cont("%lu ", node->gap[i]); + for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) { + switch (format) { + case mt_dump_hex: + pr_cont("%lx ", node->gap[i]); + break; + case mt_dump_dec: + pr_cont("%lu ", node->gap[i]); + } + } pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap); - for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++) - pr_cont("%p %lu ", node->slot[i], node->pivot[i]); - pr_cont("%p\n", node->slot[i]); + for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++) { + switch (format) { + case mt_dump_hex: + pr_cont(PTR_FMT " %lX ", node->slot[i], node->pivot[i]); + break; + case mt_dump_dec: + pr_cont(PTR_FMT " %lu ", node->slot[i], node->pivot[i]); + } + } + pr_cont(PTR_FMT "\n", node->slot[i]); for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) { unsigned long last = max; @@ -6854,19 +6792,22 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry, break; if (last == 0 && i > 0) break; - if (leaf) - mt_dump_entry(mt_slot(mt, node->slot, i), - first, last, depth + 1, format); - else if (node->slot[i]) + if (node->slot[i]) mt_dump_node(mt, mt_slot(mt, node->slot, i), first, last, depth + 1, format); if (last == max) break; if (last > max) { - pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", + switch(format) { + case mt_dump_hex: + pr_err("node " PTR_FMT " last (%lx) > max (%lx) at pivot %d!\n", node, last, max, i); - break; + break; + case mt_dump_dec: + pr_err("node " PTR_FMT " last (%lu) > max (%lu) at pivot %d!\n", + node, last, max, i); + } } first = last + 1; } @@ -6882,8 +6823,8 @@ static void mt_dump_node(const struct maple_tree *mt, void *entry, mt_dump_range(min, max, depth, format); - pr_cont("node %p depth %d type %d parent %p", node, depth, type, - node ? node->parent : NULL); + pr_cont("node " PTR_FMT " depth %d type %d parent " PTR_FMT, node, + depth, type, node ? node->parent : NULL); switch (type) { case maple_dense: pr_cont("\n"); @@ -6911,12 +6852,14 @@ void mt_dump(const struct maple_tree *mt, enum mt_dump_format format) { void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt)); - pr_info("maple_tree(%p) flags %X, height %u root %p\n", + pr_info("maple_tree(" PTR_FMT ") flags %X, height %u root " PTR_FMT "\n", mt, mt->ma_flags, mt_height(mt), entry); - if (!xa_is_node(entry)) - mt_dump_entry(entry, 0, 0, 0, format); - else if (entry) + if (xa_is_node(entry)) mt_dump_node(mt, entry, 0, mt_node_max(entry), 0, format); + else if (entry) + mt_dump_entry(entry, 0, 0, 0, format); + else + pr_info("(empty)\n"); } EXPORT_SYMBOL_GPL(mt_dump); @@ -6927,15 +6870,16 @@ EXPORT_SYMBOL_GPL(mt_dump); static void mas_validate_gaps(struct ma_state *mas) { struct maple_enode *mte = mas->node; - struct maple_node *p_mn; + struct maple_node *p_mn, *node = mte_to_node(mte); + enum maple_type mt = mte_node_type(mas->node); unsigned long gap = 0, max_gap = 0; unsigned long p_end, p_start = mas->min; - unsigned char p_slot; + unsigned char p_slot, offset; unsigned long *gaps = NULL; - unsigned long *pivots = ma_pivots(mte_to_node(mte), mte_node_type(mte)); - int i; + unsigned long *pivots = ma_pivots(node, mt); + unsigned int i; - if (ma_is_dense(mte_node_type(mte))) { + if (ma_is_dense(mt)) { for (i = 0; i < mt_slot_count(mte); i++) { if (mas_get_slot(mas, i)) { if (gap > max_gap) @@ -6948,52 +6892,59 @@ static void mas_validate_gaps(struct ma_state *mas) goto counted; } - gaps = ma_gaps(mte_to_node(mte), mte_node_type(mte)); + gaps = ma_gaps(node, mt); for (i = 0; i < mt_slot_count(mte); i++) { - p_end = mas_logical_pivot(mas, pivots, i, mte_node_type(mte)); + p_end = mas_safe_pivot(mas, pivots, i, mt); if (!gaps) { - if (mas_get_slot(mas, i)) { - gap = 0; - goto not_empty; - } - - gap += p_end - p_start + 1; + if (!mas_get_slot(mas, i)) + gap = p_end - p_start + 1; } else { void *entry = mas_get_slot(mas, i); gap = gaps[i]; - if (!entry) { - if (gap != p_end - p_start + 1) { - pr_err("%p[%u] -> %p %lu != %lu - %lu + 1\n", - mas_mn(mas), i, - mas_get_slot(mas, i), gap, - p_end, p_start); - mt_dump(mas->tree, mt_dump_hex); - - MT_BUG_ON(mas->tree, - gap != p_end - p_start + 1); - } - } else { - if (gap > p_end - p_start + 1) { - pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n", - mas_mn(mas), i, gap, p_end, p_start, - p_end - p_start + 1); - MT_BUG_ON(mas->tree, - gap > p_end - p_start + 1); - } + MT_BUG_ON(mas->tree, !entry); + + if (gap > p_end - p_start + 1) { + pr_err(PTR_FMT "[%u] %lu >= %lu - %lu + 1 (%lu)\n", + mas_mn(mas), i, gap, p_end, p_start, + p_end - p_start + 1); + MT_BUG_ON(mas->tree, gap > p_end - p_start + 1); } } if (gap > max_gap) max_gap = gap; -not_empty: + p_start = p_end + 1; if (p_end >= mas->max) break; } counted: + if (mt == maple_arange_64) { + MT_BUG_ON(mas->tree, !gaps); + offset = ma_meta_gap(node); + if (offset > i) { + pr_err("gap offset " PTR_FMT "[%u] is invalid\n", node, offset); + MT_BUG_ON(mas->tree, 1); + } + + if (gaps[offset] != max_gap) { + pr_err("gap " PTR_FMT "[%u] is not the largest gap %lu\n", + node, offset, max_gap); + MT_BUG_ON(mas->tree, 1); + } + + for (i++ ; i < mt_slot_count(mte); i++) { + if (gaps[i] != 0) { + pr_err("gap " PTR_FMT "[%u] beyond node limit != 0\n", + node, i); + MT_BUG_ON(mas->tree, 1); + } + } + } + if (mte_is_root(mte)) return; @@ -7001,12 +6952,10 @@ counted: p_mn = mte_parent(mte); MT_BUG_ON(mas->tree, max_gap > mas->max); if (ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap) { - pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap); + pr_err("gap " PTR_FMT "[%u] != %lu\n", p_mn, p_slot, max_gap); mt_dump(mas->tree, mt_dump_hex); + MT_BUG_ON(mas->tree, 1); } - - MT_BUG_ON(mas->tree, - ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap); } static void mas_validate_parent_slot(struct ma_state *mas) @@ -7033,11 +6982,11 @@ static void mas_validate_parent_slot(struct ma_state *mas) node = mas_slot(mas, slots, i); if (i == p_slot) { if (node != mas->node) - pr_err("parent %p[%u] does not have %p\n", + pr_err("parent " PTR_FMT "[%u] does not have " PTR_FMT "\n", parent, i, mas_mn(mas)); MT_BUG_ON(mas->tree, node != mas->node); } else if (node == mas->node) { - pr_err("Invalid child %p at parent %p[%u] p_slot %u\n", + pr_err("Invalid child " PTR_FMT " at parent " PTR_FMT "[%u] p_slot %u\n", mas_mn(mas), parent, i, p_slot); MT_BUG_ON(mas->tree, node == mas->node); } @@ -7057,30 +7006,36 @@ static void mas_validate_child_slot(struct ma_state *mas) for (i = 0; i < mt_slots[type]; i++) { child = mas_slot(mas, slots, i); - if (!pivots[i] || pivots[i] == mas->max) - break; - if (!child) - break; + if (!child) { + pr_err("Non-leaf node lacks child at " PTR_FMT "[%u]\n", + mas_mn(mas), i); + MT_BUG_ON(mas->tree, 1); + } if (mte_parent_slot(child) != i) { - pr_err("Slot error at %p[%u]: child %p has pslot %u\n", + pr_err("Slot error at " PTR_FMT "[%u]: child " PTR_FMT " has pslot %u\n", mas_mn(mas), i, mte_to_node(child), mte_parent_slot(child)); MT_BUG_ON(mas->tree, 1); } if (mte_parent(child) != mte_to_node(mas->node)) { - pr_err("child %p has parent %p not %p\n", + pr_err("child " PTR_FMT " has parent " PTR_FMT " not " PTR_FMT "\n", mte_to_node(child), mte_parent(child), mte_to_node(mas->node)); MT_BUG_ON(mas->tree, 1); } + + if (i < mt_pivots[type] && pivots[i] == mas->max) + break; } } /* - * Validate all pivots are within mas->min and mas->max. + * Validate all pivots are within mas->min and mas->max, check metadata ends + * where the maximum ends and ensure there is no slots or pivots set outside of + * the end of the data. */ static void mas_validate_limits(struct ma_state *mas) { @@ -7090,41 +7045,30 @@ static void mas_validate_limits(struct ma_state *mas) void __rcu **slots = ma_slots(mte_to_node(mas->node), type); unsigned long *pivots = ma_pivots(mas_mn(mas), type); - /* all limits are fine here. */ - if (mte_is_root(mas->node)) - return; - for (i = 0; i < mt_slots[type]; i++) { unsigned long piv; piv = mas_safe_pivot(mas, pivots, i, type); - if (!piv && (i != 0)) - break; - - if (!mte_is_leaf(mas->node)) { - void *entry = mas_slot(mas, slots, i); - - if (!entry) - pr_err("%p[%u] cannot be null\n", - mas_mn(mas), i); - - MT_BUG_ON(mas->tree, !entry); + if (!piv && (i != 0)) { + pr_err("Missing node limit pivot at " PTR_FMT "[%u]", + mas_mn(mas), i); + MAS_WARN_ON(mas, 1); } if (prev_piv > piv) { - pr_err("%p[%u] piv %lu < prev_piv %lu\n", + pr_err(PTR_FMT "[%u] piv %lu < prev_piv %lu\n", mas_mn(mas), i, piv, prev_piv); MAS_WARN_ON(mas, piv < prev_piv); } if (piv < mas->min) { - pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i, + pr_err(PTR_FMT "[%u] %lu < %lu\n", mas_mn(mas), i, piv, mas->min); MAS_WARN_ON(mas, piv < mas->min); } if (piv > mas->max) { - pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i, + pr_err(PTR_FMT "[%u] %lu > %lu\n", mas_mn(mas), i, piv, mas->max); MAS_WARN_ON(mas, piv > mas->max); } @@ -7132,12 +7076,19 @@ static void mas_validate_limits(struct ma_state *mas) if (piv == mas->max) break; } + + if (mas_data_end(mas) != i) { + pr_err("node" PTR_FMT ": data_end %u != the last slot offset %u\n", + mas_mn(mas), mas_data_end(mas), i); + MT_BUG_ON(mas->tree, 1); + } + for (i += 1; i < mt_slots[type]; i++) { void *entry = mas_slot(mas, slots, i); if (entry && (i != mt_slots[type] - 1)) { - pr_err("%p[%u] should not have entry %p\n", mas_mn(mas), - i, entry); + pr_err(PTR_FMT "[%u] should not have entry " PTR_FMT "\n", + mas_mn(mas), i, entry); MT_BUG_ON(mas->tree, entry != NULL); } @@ -7147,7 +7098,7 @@ static void mas_validate_limits(struct ma_state *mas) if (!piv) continue; - pr_err("%p[%u] should not have piv %lu\n", + pr_err(PTR_FMT "[%u] should not have piv %lu\n", mas_mn(mas), i, piv); MAS_WARN_ON(mas, i < mt_pivots[type] - 1); } @@ -7162,7 +7113,7 @@ static void mt_validate_nulls(struct maple_tree *mt) MA_STATE(mas, mt, 0, 0); mas_start(&mas); - if (mas_is_none(&mas) || (mas.node == MAS_ROOT)) + if (mas_is_none(&mas) || (mas_is_ptr(&mas))) return; while (!mte_is_leaf(mas.node)) @@ -7172,14 +7123,14 @@ static void mt_validate_nulls(struct maple_tree *mt) do { entry = mas_slot(&mas, slots, offset); if (!last && !entry) { - pr_err("Sequential nulls end at %p[%u]\n", + pr_err("Sequential nulls end at " PTR_FMT "[%u]\n", mas_mn(&mas), offset); } MT_BUG_ON(mt, !last && !entry); last = entry; if (offset == mas_data_end(&mas)) { mas_next_node(&mas, mas_mn(&mas), ULONG_MAX); - if (mas_is_none(&mas)) + if (mas_is_overflow(&mas)) return; offset = 0; slots = ma_slots(mte_to_node(mas.node), @@ -7188,7 +7139,7 @@ static void mt_validate_nulls(struct maple_tree *mt) offset++; } - } while (!mas_is_none(&mas)); + } while (!mas_is_overflow(&mas)); } /* @@ -7197,56 +7148,108 @@ static void mt_validate_nulls(struct maple_tree *mt) * 2. The gap is correctly set in the parents */ void mt_validate(struct maple_tree *mt) + __must_hold(mas->tree->ma_lock) { unsigned char end; MA_STATE(mas, mt, 0, 0); - rcu_read_lock(); mas_start(&mas); - if (!mas_searchable(&mas)) - goto done; + if (!mas_is_active(&mas)) + return; - mas_first_entry(&mas, mas_mn(&mas), ULONG_MAX, mte_node_type(mas.node)); - while (!mas_is_none(&mas)) { + while (!mte_is_leaf(mas.node)) + mas_descend(&mas); + + while (!mas_is_overflow(&mas)) { MAS_WARN_ON(&mas, mte_dead_node(mas.node)); - if (!mte_is_root(mas.node)) { - end = mas_data_end(&mas); - if (MAS_WARN_ON(&mas, - (end < mt_min_slot_count(mas.node)) && - (mas.max != ULONG_MAX))) { - pr_err("Invalid size %u of %p\n", end, - mas_mn(&mas)); - } + end = mas_data_end(&mas); + if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) && + (!mte_is_root(mas.node)))) { + pr_err("Invalid size %u of " PTR_FMT "\n", + end, mas_mn(&mas)); } + mas_validate_parent_slot(&mas); - mas_validate_child_slot(&mas); mas_validate_limits(&mas); + mas_validate_child_slot(&mas); if (mt_is_alloc(mt)) mas_validate_gaps(&mas); mas_dfs_postorder(&mas, ULONG_MAX); } mt_validate_nulls(mt); -done: - rcu_read_unlock(); - } EXPORT_SYMBOL_GPL(mt_validate); void mas_dump(const struct ma_state *mas) { - pr_err("MAS: tree=%p enode=%p ", mas->tree, mas->node); - if (mas_is_none(mas)) - pr_err("(MAS_NONE) "); - else if (mas_is_ptr(mas)) - pr_err("(MAS_ROOT) "); - else if (mas_is_start(mas)) - pr_err("(MAS_START) "); - else if (mas_is_paused(mas)) - pr_err("(MAS_PAUSED) "); - - pr_err("[%u] index=%lx last=%lx\n", mas->offset, mas->index, mas->last); - pr_err(" min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n", - mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags); + pr_err("MAS: tree=" PTR_FMT " enode=" PTR_FMT " ", + mas->tree, mas->node); + switch (mas->status) { + case ma_active: + pr_err("(ma_active)"); + break; + case ma_none: + pr_err("(ma_none)"); + break; + case ma_root: + pr_err("(ma_root)"); + break; + case ma_start: + pr_err("(ma_start) "); + break; + case ma_pause: + pr_err("(ma_pause) "); + break; + case ma_overflow: + pr_err("(ma_overflow) "); + break; + case ma_underflow: + pr_err("(ma_underflow) "); + break; + case ma_error: + pr_err("(ma_error) "); + break; + } + + pr_err("Store Type: "); + switch (mas->store_type) { + case wr_invalid: + pr_err("invalid store type\n"); + break; + case wr_new_root: + pr_err("new_root\n"); + break; + case wr_store_root: + pr_err("store_root\n"); + break; + case wr_exact_fit: + pr_err("exact_fit\n"); + break; + case wr_split_store: + pr_err("split_store\n"); + break; + case wr_slot_store: + pr_err("slot_store\n"); + break; + case wr_append: + pr_err("append\n"); + break; + case wr_node_store: + pr_err("node_store\n"); + break; + case wr_spanning_store: + pr_err("spanning_store\n"); + break; + case wr_rebalance: + pr_err("rebalance\n"); + break; + } + + pr_err("[%u/%u] index=%lx last=%lx\n", mas->offset, mas->end, + mas->index, mas->last); + pr_err(" min=%lx max=%lx sheaf=" PTR_FMT ", request %lu depth=%u, flags=%x\n", + mas->min, mas->max, mas->sheaf, mas->node_request, mas->depth, + mas->mas_flags); if (mas->index > mas->last) pr_err("Check index & last\n"); } @@ -7254,10 +7257,10 @@ EXPORT_SYMBOL_GPL(mas_dump); void mas_wr_dump(const struct ma_wr_state *wr_mas) { - pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n", + pr_err("WR_MAS: node=" PTR_FMT " r_min=%lx r_max=%lx\n", wr_mas->node, wr_mas->r_min, wr_mas->r_max); pr_err(" type=%u off_end=%u, node_end=%u, end_piv=%lx\n", - wr_mas->type, wr_mas->offset_end, wr_mas->node_end, + wr_mas->type, wr_mas->offset_end, wr_mas->mas->end, wr_mas->end_piv); } EXPORT_SYMBOL_GPL(mas_wr_dump); |
