From d6e8d0dc19a3ebea185cd8e99f2e960d81b153ad Mon Sep 17 00:00:00 2001 From: Peng Zhang Date: Wed, 28 Jun 2023 15:36:54 +0800 Subject: maple_tree: add test for mas_wr_modify() fast path Patch series "Optimize the fast path of mas_store()", v4. Add fast paths for mas_wr_append() and mas_wr_slot_store() respectively. The newly added fast path of mas_wr_append() is used in fork() and how much it benefits fork() depends on how many VMAs are duplicated. Thanks Liam for the review. This patch (of 4): Add tests for all cases of mas_wr_append() and mas_wr_slot_store(). Link: https://lkml.kernel.org/r/20230628073657.75314-1-zhangpeng.00@bytedance.com Link: https://lkml.kernel.org/r/20230628073657.75314-2-zhangpeng.00@bytedance.com Signed-off-by: Peng Zhang Reviewed-by: Liam R. Howlett Signed-off-by: Andrew Morton --- lib/test_maple_tree.c | 65 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) (limited to 'lib/test_maple_tree.c') diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 8d4c92cbdd0c..3207c2107918 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -1157,6 +1157,71 @@ static noinline void __init check_ranges(struct maple_tree *mt) MT_BUG_ON(mt, !mt_height(mt)); mtree_destroy(mt); + /* Check in-place modifications */ + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + /* Append to the start of last range */ + mt_set_non_kernel(50); + for (i = 0; i <= 500; i++) { + val = i * 5 + 1; + val2 = val + 4; + check_store_range(mt, val, val2, xa_mk_value(val), 0); + } + + /* Append to the last range without touching any boundaries */ + for (i = 0; i < 10; i++) { + val = val2 + 5; + val2 = val + 4; + check_store_range(mt, val, val2, xa_mk_value(val), 0); + } + + /* Append to the end of last range */ + val = val2; + for (i = 0; i < 10; i++) { + val += 5; + MT_BUG_ON(mt, mtree_test_store_range(mt, val, ULONG_MAX, + xa_mk_value(val)) != 0); + } + + /* Overwriting the range and over a part of the next range */ + for (i = 10; i < 30; i += 2) { + val = i * 5 + 1; + val2 = val + 5; + check_store_range(mt, val, val2, xa_mk_value(val), 0); + } + + /* Overwriting a part of the range and over the next range */ + for (i = 50; i < 70; i += 2) { + val2 = i * 5; + val = val2 - 5; + check_store_range(mt, val, val2, xa_mk_value(val), 0); + } + + /* + * Expand the range, only partially overwriting the previous and + * next ranges + */ + for (i = 100; i < 130; i += 3) { + val = i * 5 - 5; + val2 = i * 5 + 1; + check_store_range(mt, val, val2, xa_mk_value(val), 0); + } + + /* + * Expand the range, only partially overwriting the previous and + * next ranges, in RCU mode + */ + mt_set_in_rcu(mt); + for (i = 150; i < 180; i += 3) { + val = i * 5 - 5; + val2 = i * 5 + 1; + check_store_range(mt, val, val2, xa_mk_value(val), 0); + } + + MT_BUG_ON(mt, !mt_height(mt)); + mt_validate(mt); + mt_set_non_kernel(0); + mtree_destroy(mt); + /* Test rebalance gaps */ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); mt_set_non_kernel(50); -- cgit