summaryrefslogtreecommitdiff
path: root/lib/slub_kunit.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/slub_kunit.c')
-rw-r--r--lib/slub_kunit.c83
1 files changed, 81 insertions, 2 deletions
diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
index d4a3730b08fa..f11691315c2f 100644
--- a/lib/slub_kunit.c
+++ b/lib/slub_kunit.c
@@ -5,6 +5,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/rcupdate.h>
#include "../mm/slab.h"
static struct kunit_resource resource;
@@ -55,7 +56,7 @@ static void test_next_pointer(struct kunit *test)
ptr_addr = (unsigned long *)(p + s->offset);
tmp = *ptr_addr;
- p[s->offset] = 0x12;
+ p[s->offset] = ~p[s->offset];
/*
* Expecting three errors.
@@ -140,7 +141,7 @@ static void test_kmalloc_redzone_access(struct kunit *test)
{
struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_kmalloc", 32,
SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
- u8 *p = kmalloc_trace(s, GFP_KERNEL, 18);
+ u8 *p = alloc_hooks(__kmalloc_cache_noprof(s, GFP_KERNEL, 18));
kasan_disable_current();
@@ -157,6 +158,81 @@ static void test_kmalloc_redzone_access(struct kunit *test)
kmem_cache_destroy(s);
}
+struct test_kfree_rcu_struct {
+ struct rcu_head rcu;
+};
+
+static void test_kfree_rcu(struct kunit *test)
+{
+ struct kmem_cache *s;
+ struct test_kfree_rcu_struct *p;
+
+ if (IS_BUILTIN(CONFIG_SLUB_KUNIT_TEST))
+ kunit_skip(test, "can't do kfree_rcu() when test is built-in");
+
+ s = test_kmem_cache_create("TestSlub_kfree_rcu",
+ sizeof(struct test_kfree_rcu_struct),
+ SLAB_NO_MERGE);
+ p = kmem_cache_alloc(s, GFP_KERNEL);
+
+ kfree_rcu(p, rcu);
+ kmem_cache_destroy(s);
+
+ KUNIT_EXPECT_EQ(test, 0, slab_errors);
+}
+
+static void test_leak_destroy(struct kunit *test)
+{
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_leak_destroy",
+ 64, SLAB_NO_MERGE);
+ kmem_cache_alloc(s, GFP_KERNEL);
+
+ kmem_cache_destroy(s);
+
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+}
+
+static void test_krealloc_redzone_zeroing(struct kunit *test)
+{
+ u8 *p;
+ int i;
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_krealloc", 64,
+ SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
+
+ p = alloc_hooks(__kmalloc_cache_noprof(s, GFP_KERNEL, 48));
+ memset(p, 0xff, 48);
+
+ kasan_disable_current();
+ OPTIMIZER_HIDE_VAR(p);
+
+ /* Test shrink */
+ p = krealloc(p, 40, GFP_KERNEL | __GFP_ZERO);
+ for (i = 40; i < 64; i++)
+ KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
+
+ /* Test grow within the same 64B kmalloc object */
+ p = krealloc(p, 56, GFP_KERNEL | __GFP_ZERO);
+ for (i = 40; i < 56; i++)
+ KUNIT_EXPECT_EQ(test, p[i], 0);
+ for (i = 56; i < 64; i++)
+ KUNIT_EXPECT_EQ(test, p[i], SLUB_RED_ACTIVE);
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 0, slab_errors);
+
+ memset(p, 0xff, 56);
+ /* Test grow with allocating a bigger 128B object */
+ p = krealloc(p, 112, GFP_KERNEL | __GFP_ZERO);
+ for (i = 0; i < 56; i++)
+ KUNIT_EXPECT_EQ(test, p[i], 0xff);
+ for (i = 56; i < 112; i++)
+ KUNIT_EXPECT_EQ(test, p[i], 0);
+
+ kfree(p);
+ kasan_enable_current();
+ kmem_cache_destroy(s);
+}
+
static int test_init(struct kunit *test)
{
slab_errors = 0;
@@ -177,6 +253,9 @@ static struct kunit_case test_cases[] = {
KUNIT_CASE(test_clobber_redzone_free),
KUNIT_CASE(test_kmalloc_redzone_access),
+ KUNIT_CASE(test_kfree_rcu),
+ KUNIT_CASE(test_leak_destroy),
+ KUNIT_CASE(test_krealloc_redzone_zeroing),
{}
};