summaryrefslogtreecommitdiff
path: root/mm/kasan/kasan_test_c.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/kasan/kasan_test_c.c')
-rw-r--r--mm/kasan/kasan_test_c.c118
1 files changed, 117 insertions, 1 deletions
diff --git a/mm/kasan/kasan_test_c.c b/mm/kasan/kasan_test_c.c
index d8fb281e439d..e0ec5a6d15be 100644
--- a/mm/kasan/kasan_test_c.c
+++ b/mm/kasan/kasan_test_c.c
@@ -33,6 +33,8 @@
#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+
static bool multishot;
/* Fields set based on lines observed in the console. */
@@ -213,6 +215,36 @@ static void kmalloc_node_oob_right(struct kunit *test)
kfree(ptr);
}
+static void kmalloc_track_caller_oob_right(struct kunit *test)
+{
+ char *ptr;
+ size_t size = 128 - KASAN_GRANULE_SIZE;
+
+ /*
+ * Check that KASAN detects out-of-bounds access for object allocated via
+ * kmalloc_track_caller().
+ */
+ ptr = kmalloc_track_caller(size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+ OPTIMIZER_HIDE_VAR(ptr);
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'y');
+
+ kfree(ptr);
+
+ /*
+ * Check that KASAN detects out-of-bounds access for object allocated via
+ * kmalloc_node_track_caller().
+ */
+ ptr = kmalloc_node_track_caller(size, GFP_KERNEL, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+
+ OPTIMIZER_HIDE_VAR(ptr);
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'y');
+
+ kfree(ptr);
+}
+
/*
* Check that KASAN detects an out-of-bounds access for a big object allocated
* via kmalloc(). But not as big as to trigger the page_alloc fallback.
@@ -1928,10 +1960,92 @@ static void rust_uaf(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, kasan_test_rust_uaf());
}
+static void copy_to_kernel_nofault_oob(struct kunit *test)
+{
+ char *ptr;
+ char buf[128];
+ size_t size = sizeof(buf);
+
+ /*
+ * This test currently fails with the HW_TAGS mode. The reason is
+ * unknown and needs to be investigated.
+ */
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
+
+ ptr = kmalloc(size - KASAN_GRANULE_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ OPTIMIZER_HIDE_VAR(ptr);
+
+ /*
+ * We test copy_to_kernel_nofault() to detect corrupted memory that is
+ * being written into the kernel. In contrast,
+ * copy_from_kernel_nofault() is primarily used in kernel helper
+ * functions where the source address might be random or uninitialized.
+ * Applying KASAN instrumentation to copy_from_kernel_nofault() could
+ * lead to false positives. By focusing KASAN checks only on
+ * copy_to_kernel_nofault(), we ensure that only valid memory is
+ * written to the kernel, minimizing the risk of kernel corruption
+ * while avoiding false positives in the reverse case.
+ */
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ copy_to_kernel_nofault(&buf[0], ptr, size));
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ copy_to_kernel_nofault(ptr, &buf[0], size));
+
+ kfree(ptr);
+}
+
+static void copy_user_test_oob(struct kunit *test)
+{
+ char *kmem;
+ char __user *usermem;
+ unsigned long useraddr;
+ size_t size = 128 - KASAN_GRANULE_SIZE;
+ int __maybe_unused unused;
+
+ kmem = kunit_kmalloc(test, size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, kmem);
+
+ useraddr = kunit_vm_mmap(test, NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ KUNIT_ASSERT_NE_MSG(test, useraddr, 0,
+ "Could not create userspace mm");
+ KUNIT_ASSERT_LT_MSG(test, useraddr, (unsigned long)TASK_SIZE,
+ "Failed to allocate user memory");
+
+ OPTIMIZER_HIDE_VAR(size);
+ usermem = (char __user *)useraddr;
+
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ unused = copy_from_user(kmem, usermem, size + 1));
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ unused = copy_to_user(usermem, kmem, size + 1));
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ unused = __copy_from_user(kmem, usermem, size + 1));
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ unused = __copy_to_user(usermem, kmem, size + 1));
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ unused = __copy_from_user_inatomic(kmem, usermem, size + 1));
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ unused = __copy_to_user_inatomic(usermem, kmem, size + 1));
+
+ /*
+ * Prepare a long string in usermem to avoid the strncpy_from_user test
+ * bailing out on '\0' before it reaches out-of-bounds.
+ */
+ memset(kmem, 'a', size);
+ KUNIT_EXPECT_EQ(test, copy_to_user(usermem, kmem, size), 0);
+
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ unused = strncpy_from_user(kmem, usermem, size + 1));
+}
+
static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_oob_right),
KUNIT_CASE(kmalloc_oob_left),
KUNIT_CASE(kmalloc_node_oob_right),
+ KUNIT_CASE(kmalloc_track_caller_oob_right),
KUNIT_CASE(kmalloc_big_oob_right),
KUNIT_CASE(kmalloc_large_oob_right),
KUNIT_CASE(kmalloc_large_uaf),
@@ -1992,7 +2106,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kasan_strings),
KUNIT_CASE(kasan_bitops_generic),
KUNIT_CASE(kasan_bitops_tags),
- KUNIT_CASE(kasan_atomics),
+ KUNIT_CASE_SLOW(kasan_atomics),
KUNIT_CASE(vmalloc_helpers_tags),
KUNIT_CASE(vmalloc_oob),
KUNIT_CASE(vmap_tags),
@@ -2000,7 +2114,9 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(match_all_not_assigned),
KUNIT_CASE(match_all_ptr_tag),
KUNIT_CASE(match_all_mem_tag),
+ KUNIT_CASE(copy_to_kernel_nofault_oob),
KUNIT_CASE(rust_uaf),
+ KUNIT_CASE(copy_user_test_oob),
{}
};