diff options
author | Liam R. Howlett <Liam.Howlett@Oracle.com> | 2025-09-03 15:00:02 +0200 |
---|---|---|
committer | Vlastimil Babka <vbabka@suse.cz> | 2025-09-29 09:40:46 +0200 |
commit | 6bf377b06c08049d0f4042493df302285e45165e (patch) | |
tree | c80922435d2820e075dc1a2ba81b13ef4cddef2c /lib/maple_tree.c | |
parent | 9b05890a25d9197e39fcf5b2298f0b911c323306 (diff) |
maple_tree: Add single node allocation support to maple state
The fast path through a write will require replacing a single node in
the tree. Using a sheaf (32 nodes) is too heavy for the fast path, so
special case the node store operation by just allocating one node in the
maple state.
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'lib/maple_tree.c')
-rw-r--r-- | lib/maple_tree.c | 47 |
1 files changed, 41 insertions, 6 deletions
diff --git a/lib/maple_tree.c b/lib/maple_tree.c index b41245f2cc65..2b7299409900 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1073,16 +1073,23 @@ static int mas_ascend(struct ma_state *mas) * * Return: A pointer to a maple node. */ -static inline struct maple_node *mas_pop_node(struct ma_state *mas) +static __always_inline struct maple_node *mas_pop_node(struct ma_state *mas) { struct maple_node *ret; + if (mas->alloc) { + ret = mas->alloc; + mas->alloc = NULL; + goto out; + } + if (WARN_ON_ONCE(!mas->sheaf)) return NULL; ret = kmem_cache_alloc_from_sheaf(maple_node_cache, GFP_NOWAIT, mas->sheaf); - memset(ret, 0, sizeof(*ret)); +out: + memset(ret, 0, sizeof(*ret)); return ret; } @@ -1093,9 +1100,34 @@ static inline struct maple_node *mas_pop_node(struct ma_state *mas) */ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) { - if (unlikely(mas->sheaf)) { - unsigned long refill = mas->node_request; + if (!mas->node_request) + return; + + if (mas->node_request == 1) { + if (mas->sheaf) + goto use_sheaf; + + if (mas->alloc) + return; + mas->alloc = mt_alloc_one(gfp); + if (!mas->alloc) + goto error; + + mas->node_request = 0; + return; + } + +use_sheaf: + if (unlikely(mas->alloc)) { + kfree(mas->alloc); + mas->alloc = NULL; + } + + if (mas->sheaf) { + unsigned long refill; + + refill = mas->node_request; if (kmem_cache_sheaf_size(mas->sheaf) >= refill) { mas->node_request = 0; return; @@ -5180,8 +5212,11 @@ void mas_destroy(struct ma_state *mas) mas->node_request = 0; if (mas->sheaf) mt_return_sheaf(mas->sheaf); - mas->sheaf = NULL; + + if (mas->alloc) + kfree(mas->alloc); + mas->alloc = NULL; } EXPORT_SYMBOL_GPL(mas_destroy); @@ -5816,7 +5851,7 @@ bool mas_nomem(struct ma_state *mas, gfp_t gfp) mas_alloc_nodes(mas, gfp); } - if (!mas->sheaf) + if (!mas->sheaf && !mas->alloc) return false; mas->status = ma_start; |