diff options
Diffstat (limited to 'mm/kasan')
-rw-r--r-- | mm/kasan/Makefile | 13 | ||||
-rw-r--r-- | mm/kasan/common.c | 64 | ||||
-rw-r--r-- | mm/kasan/generic.c | 25 | ||||
-rw-r--r-- | mm/kasan/hw_tags.c | 13 | ||||
-rw-r--r-- | mm/kasan/init.c | 12 | ||||
-rw-r--r-- | mm/kasan/kasan.h | 26 | ||||
-rw-r--r-- | mm/kasan/kasan_test_c.c (renamed from mm/kasan/kasan_test.c) | 230 | ||||
-rw-r--r-- | mm/kasan/kasan_test_module.c | 81 | ||||
-rw-r--r-- | mm/kasan/kasan_test_rust.rs | 22 | ||||
-rw-r--r-- | mm/kasan/report.c | 40 | ||||
-rw-r--r-- | mm/kasan/shadow.c | 106 | ||||
-rw-r--r-- | mm/kasan/sw_tags.c | 3 |
12 files changed, 411 insertions, 224 deletions
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile index 7634dd2a6128..dd93ae8a6beb 100644 --- a/mm/kasan/Makefile +++ b/mm/kasan/Makefile @@ -35,7 +35,7 @@ CFLAGS_shadow.o := $(CC_FLAGS_KASAN_RUNTIME) CFLAGS_hw_tags.o := $(CC_FLAGS_KASAN_RUNTIME) CFLAGS_sw_tags.o := $(CC_FLAGS_KASAN_RUNTIME) -CFLAGS_KASAN_TEST := $(CFLAGS_KASAN) $(call cc-disable-warning, vla) +CFLAGS_KASAN_TEST := $(CFLAGS_KASAN) ifndef CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX # If compiler instruments memintrinsics by prefixing them with __asan/__hwasan, # we need to treat them normally (as builtins), otherwise the compiler won't @@ -44,13 +44,18 @@ ifndef CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX CFLAGS_KASAN_TEST += -fno-builtin endif -CFLAGS_kasan_test.o := $(CFLAGS_KASAN_TEST) -CFLAGS_kasan_test_module.o := $(CFLAGS_KASAN_TEST) +CFLAGS_REMOVE_kasan_test_c.o += $(call cc-option, -Wvla-larger-than=1) +CFLAGS_kasan_test_c.o := $(CFLAGS_KASAN_TEST) +RUSTFLAGS_kasan_test_rust.o := $(RUSTFLAGS_KASAN) obj-y := common.o report.o obj-$(CONFIG_KASAN_GENERIC) += init.o generic.o report_generic.o shadow.o quarantine.o obj-$(CONFIG_KASAN_HW_TAGS) += hw_tags.o report_hw_tags.o tags.o report_tags.o obj-$(CONFIG_KASAN_SW_TAGS) += init.o report_sw_tags.o shadow.o sw_tags.o tags.o report_tags.o +kasan_test-objs := kasan_test_c.o +ifdef CONFIG_RUST + kasan_test-objs += kasan_test_rust.o +endif + obj-$(CONFIG_KASAN_KUNIT_TEST) += kasan_test.o -obj-$(CONFIG_KASAN_MODULE_TEST) += kasan_test_module.o diff --git a/mm/kasan/common.c b/mm/kasan/common.c index e7c9a4dc89f8..ed4873e18c75 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -208,15 +208,12 @@ void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, return (void *)object; } -static inline bool poison_slab_object(struct kmem_cache *cache, void *object, - unsigned long ip, bool init) +/* Returns true when freeing the object is not safe. */ +static bool check_slab_allocation(struct kmem_cache *cache, void *object, + unsigned long ip) { - void *tagged_object; - - if (!kasan_arch_is_ready()) - return false; + void *tagged_object = object; - tagged_object = object; object = kasan_reset_tag(object); if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) { @@ -224,37 +221,47 @@ static inline bool poison_slab_object(struct kmem_cache *cache, void *object, return true; } - /* RCU slabs could be legally used after free within the RCU period. */ - if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) - return false; - if (!kasan_byte_accessible(tagged_object)) { kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE); return true; } + return false; +} + +static inline void poison_slab_object(struct kmem_cache *cache, void *object, + bool init, bool still_accessible) +{ + void *tagged_object = object; + + object = kasan_reset_tag(object); + + /* RCU slabs could be legally used after free within the RCU period. */ + if (unlikely(still_accessible)) + return; + kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE), KASAN_SLAB_FREE, init); if (kasan_stack_collection_enabled()) kasan_save_free_info(cache, tagged_object); +} - return false; +bool __kasan_slab_pre_free(struct kmem_cache *cache, void *object, + unsigned long ip) +{ + if (!kasan_arch_is_ready() || is_kfence_address(object)) + return false; + return check_slab_allocation(cache, object, ip); } -bool __kasan_slab_free(struct kmem_cache *cache, void *object, - unsigned long ip, bool init) +bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init, + bool still_accessible) { - if (is_kfence_address(object)) + if (!kasan_arch_is_ready() || is_kfence_address(object)) return false; - /* - * If the object is buggy, do not let slab put the object onto the - * freelist. The object will thus never be allocated again and its - * metadata will never get released. - */ - if (poison_slab_object(cache, object, ip, init)) - return true; + poison_slab_object(cache, object, init, still_accessible); /* * If the object is put into quarantine, do not let slab put the object @@ -504,11 +511,16 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip) return true; } - if (is_kfence_address(ptr)) - return false; + if (is_kfence_address(ptr) || !kasan_arch_is_ready()) + return true; slab = folio_slab(folio); - return !poison_slab_object(slab->slab_cache, ptr, ip, false); + + if (check_slab_allocation(slab->slab_cache, ptr, ip)) + return false; + + poison_slab_object(slab->slab_cache, ptr, false, false); + return true; } void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip) @@ -532,7 +544,7 @@ void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip) return; /* Unpoison the object and save alloc info for non-kmalloc() allocations. */ - unpoison_slab_object(slab->slab_cache, ptr, size, flags); + unpoison_slab_object(slab->slab_cache, ptr, flags, false); /* Poison the redzone and save alloc info for kmalloc() allocations. */ if (is_kmalloc_cache(slab->slab_cache)) diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index 6310a180278b..d54e89f8c3e7 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -392,9 +392,12 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can * be touched after it was freed, or * 2. Object has a constructor, which means it's expected to - * retain its content until the next allocation. + * retain its content until the next allocation, or + * 3. It is from a kmalloc cache which enables the debug option + * to store original size. */ - if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor) { + if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor || + slub_debug_orig_size(cache)) { cache->kasan_info.free_meta_offset = *size; *size += sizeof(struct kasan_free_meta); goto free_meta_added; @@ -521,7 +524,11 @@ size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object) sizeof(struct kasan_free_meta) : 0); } -static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags) +/* + * This function avoids dynamic memory allocations and thus can be called from + * contexts that do not allow allocating memory. + */ +void kasan_record_aux_stack(void *addr) { struct slab *slab = kasan_addr_to_slab(addr); struct kmem_cache *cache; @@ -538,17 +545,7 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags) return; alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0]; - alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags); -} - -void kasan_record_aux_stack(void *addr) -{ - return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC); -} - -void kasan_record_aux_stack_noalloc(void *addr) -{ - return __kasan_record_aux_stack(addr, 0); + alloc_meta->aux_stack[0] = kasan_save_stack(0, 0); } void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index 2b994092a2d4..9a6927394b54 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -8,6 +8,7 @@ #define pr_fmt(fmt) "kasan: " fmt +#include <kunit/visibility.h> #include <linux/init.h> #include <linux/kasan.h> #include <linux/kernel.h> @@ -15,7 +16,9 @@ #include <linux/mm.h> #include <linux/static_key.h> #include <linux/string.h> +#include <linux/string_choices.h> #include <linux/types.h> +#include <linux/vmalloc.h> #include "kasan.h" @@ -261,8 +264,8 @@ void __init kasan_init_hw_tags(void) pr_info("KernelAddressSanitizer initialized (hw-tags, mode=%s, vmalloc=%s, stacktrace=%s)\n", kasan_mode_info(), - kasan_vmalloc_enabled() ? "on" : "off", - kasan_stack_collection_enabled() ? "on" : "off"); + str_on_off(kasan_vmalloc_enabled()), + str_on_off(kasan_stack_collection_enabled())); } #ifdef CONFIG_KASAN_VMALLOC @@ -393,12 +396,12 @@ void kasan_enable_hw_tags(void) #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) -EXPORT_SYMBOL_GPL(kasan_enable_hw_tags); +EXPORT_SYMBOL_IF_KUNIT(kasan_enable_hw_tags); -void kasan_force_async_fault(void) +VISIBLE_IF_KUNIT void kasan_force_async_fault(void) { hw_force_async_tag_fault(); } -EXPORT_SYMBOL_GPL(kasan_force_async_fault); +EXPORT_SYMBOL_IF_KUNIT(kasan_force_async_fault); #endif diff --git a/mm/kasan/init.c b/mm/kasan/init.c index 89895f38f722..ced6b29fcf76 100644 --- a/mm/kasan/init.c +++ b/mm/kasan/init.c @@ -126,8 +126,10 @@ static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr, if (slab_is_available()) p = pte_alloc_one_kernel(&init_mm); - else + else { p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); + kernel_pte_init(p); + } if (!p) return -ENOMEM; @@ -139,10 +141,6 @@ static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr, return 0; } -void __weak __meminit pmd_init(void *addr) -{ -} - static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end) { @@ -181,10 +179,6 @@ static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr, return 0; } -void __weak __meminit pud_init(void *addr) -{ -} - static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long end) { diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index fb2b9ac0659a..129178be5e64 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -501,18 +501,18 @@ static inline bool kasan_byte_accessible(const void *addr) /** * kasan_poison - mark the memory range as inaccessible - * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE - * @size - range size, must be aligned to KASAN_GRANULE_SIZE - * @value - value that's written to metadata for the range - * @init - whether to initialize the memory range (only for hardware tag-based) + * @addr: range start address, must be aligned to KASAN_GRANULE_SIZE + * @size: range size, must be aligned to KASAN_GRANULE_SIZE + * @value: value that's written to metadata for the range + * @init: whether to initialize the memory range (only for hardware tag-based) */ void kasan_poison(const void *addr, size_t size, u8 value, bool init); /** * kasan_unpoison - mark the memory range as accessible - * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE - * @size - range size, can be unaligned - * @init - whether to initialize the memory range (only for hardware tag-based) + * @addr: range start address, must be aligned to KASAN_GRANULE_SIZE + * @size: range size, can be unaligned + * @init: whether to initialize the memory range (only for hardware tag-based) * * For the tag-based modes, the @size gets aligned to KASAN_GRANULE_SIZE before * marking the range. @@ -530,8 +530,8 @@ bool kasan_byte_accessible(const void *addr); /** * kasan_poison_last_granule - mark the last granule of the memory range as * inaccessible - * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE - * @size - range size + * @address: range start address, must be aligned to KASAN_GRANULE_SIZE + * @size: range size * * This function is only available for the generic mode, as it's the only mode * that has partially poisoned memory granules. @@ -555,6 +555,12 @@ static inline bool kasan_arch_is_ready(void) { return true; } void kasan_kunit_test_suite_start(void); void kasan_kunit_test_suite_end(void); +#ifdef CONFIG_RUST +char kasan_test_rust_uaf(void); +#else +static inline char kasan_test_rust_uaf(void) { return '\0'; } +#endif + #else /* CONFIG_KASAN_KUNIT_TEST */ static inline void kasan_kunit_test_suite_start(void) { } @@ -562,7 +568,7 @@ static inline void kasan_kunit_test_suite_end(void) { } #endif /* CONFIG_KASAN_KUNIT_TEST */ -#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) || IS_ENABLED(CONFIG_KASAN_MODULE_TEST) +#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) bool kasan_save_enable_multi_shot(void); void kasan_restore_multi_shot(bool enabled); diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test_c.c index 7b32be2a3cf0..5f922dd38ffa 100644 --- a/mm/kasan/kasan_test.c +++ b/mm/kasan/kasan_test_c.c @@ -33,6 +33,8 @@ #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE) +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); + static bool multishot; /* Fields set based on lines observed in the console. */ @@ -45,8 +47,8 @@ static struct { * Some tests use these global variables to store return values from function * calls that could otherwise be eliminated by the compiler as dead code. */ -void *kasan_ptr_result; -int kasan_int_result; +static volatile void *kasan_ptr_result; +static volatile int kasan_int_result; /* Probe for console output: obtains test_status lines of interest. */ static void probe_console(void *ignore, const char *buf, size_t len) @@ -213,6 +215,36 @@ static void kmalloc_node_oob_right(struct kunit *test) kfree(ptr); } +static void kmalloc_track_caller_oob_right(struct kunit *test) +{ + char *ptr; + size_t size = 128 - KASAN_GRANULE_SIZE; + + /* + * Check that KASAN detects out-of-bounds access for object allocated via + * kmalloc_track_caller(). + */ + ptr = kmalloc_track_caller(size, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + + OPTIMIZER_HIDE_VAR(ptr); + KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'y'); + + kfree(ptr); + + /* + * Check that KASAN detects out-of-bounds access for object allocated via + * kmalloc_node_track_caller(). + */ + ptr = kmalloc_node_track_caller(size, GFP_KERNEL, 0); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + + OPTIMIZER_HIDE_VAR(ptr); + KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'y'); + + kfree(ptr); +} + /* * Check that KASAN detects an out-of-bounds access for a big object allocated * via kmalloc(). But not as big as to trigger the page_alloc fallback. @@ -996,14 +1028,56 @@ static void kmem_cache_invalid_free(struct kunit *test) kmem_cache_destroy(cache); } -static void empty_cache_ctor(void *object) { } +static void kmem_cache_rcu_uaf(struct kunit *test) +{ + char *p; + size_t size = 200; + struct kmem_cache *cache; + + KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB_RCU_DEBUG); + + cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU, + NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); + + p = kmem_cache_alloc(cache, GFP_KERNEL); + if (!p) { + kunit_err(test, "Allocation failed: %s\n", __func__); + kmem_cache_destroy(cache); + return; + } + *p = 1; + + rcu_read_lock(); + + /* Free the object - this will internally schedule an RCU callback. */ + kmem_cache_free(cache, p); + + /* + * We should still be allowed to access the object at this point because + * the cache is SLAB_TYPESAFE_BY_RCU and we've been in an RCU read-side + * critical section since before the kmem_cache_free(). + */ + READ_ONCE(*p); + + rcu_read_unlock(); + + /* + * Wait for the RCU callback to execute; after this, the object should + * have actually been freed from KASAN's perspective. + */ + rcu_barrier(); + + KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*p)); + + kmem_cache_destroy(cache); +} static void kmem_cache_double_destroy(struct kunit *test) { struct kmem_cache *cache; - /* Provide a constructor to prevent cache merging. */ - cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor); + cache = kmem_cache_create("test_cache", 200, 0, SLAB_NO_MERGE, NULL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); kmem_cache_destroy(cache); KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache)); @@ -1493,6 +1567,7 @@ static void kasan_memcmp(struct kunit *test) static void kasan_strings(struct kunit *test) { char *ptr; + char *src; size_t size = 24; /* @@ -1504,6 +1579,25 @@ static void kasan_strings(struct kunit *test) ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + src = kmalloc(KASAN_GRANULE_SIZE, GFP_KERNEL | __GFP_ZERO); + strscpy(src, "f0cacc1a0000000", KASAN_GRANULE_SIZE); + + /* + * Make sure that strscpy() does not trigger KASAN if it overreads into + * poisoned memory. + * + * The expected size does not include the terminator '\0' + * so it is (KASAN_GRANULE_SIZE - 2) == + * KASAN_GRANULE_SIZE - ("initial removed character" + "\0"). + */ + KUNIT_EXPECT_EQ(test, KASAN_GRANULE_SIZE - 2, + strscpy(ptr, src + 1, KASAN_GRANULE_SIZE)); + + /* strscpy should fail if the first byte is unreadable. */ + KUNIT_EXPECT_KASAN_FAIL(test, strscpy(ptr, src + KASAN_GRANULE_SIZE, + KASAN_GRANULE_SIZE)); + + kfree(src); kfree(ptr); /* @@ -1765,32 +1859,6 @@ static void vm_map_ram_tags(struct kunit *test) free_pages((unsigned long)p_ptr, 1); } -static void vmalloc_percpu(struct kunit *test) -{ - char __percpu *ptr; - int cpu; - - /* - * This test is specifically crafted for the software tag-based mode, - * the only tag-based mode that poisons percpu mappings. - */ - KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS); - - ptr = __alloc_percpu(PAGE_SIZE, PAGE_SIZE); - - for_each_possible_cpu(cpu) { - char *c_ptr = per_cpu_ptr(ptr, cpu); - - KUNIT_EXPECT_GE(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_MIN); - KUNIT_EXPECT_LT(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_KERNEL); - - /* Make sure that in-bounds accesses don't crash the kernel. */ - *c_ptr = 0; - } - - free_percpu(ptr); -} - /* * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN, * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based @@ -1899,10 +1967,102 @@ static void match_all_mem_tag(struct kunit *test) kfree(ptr); } +/* + * Check that Rust performing a use-after-free using `unsafe` is detected. + * This is a smoke test to make sure that Rust is being sanitized properly. + */ +static void rust_uaf(struct kunit *test) +{ + KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_RUST); + KUNIT_EXPECT_KASAN_FAIL(test, kasan_test_rust_uaf()); +} + +static void copy_to_kernel_nofault_oob(struct kunit *test) +{ + char *ptr; + char buf[128]; + size_t size = sizeof(buf); + + /* + * This test currently fails with the HW_TAGS mode. The reason is + * unknown and needs to be investigated. + */ + KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS); + + ptr = kmalloc(size - KASAN_GRANULE_SIZE, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + OPTIMIZER_HIDE_VAR(ptr); + + /* + * We test copy_to_kernel_nofault() to detect corrupted memory that is + * being written into the kernel. In contrast, + * copy_from_kernel_nofault() is primarily used in kernel helper + * functions where the source address might be random or uninitialized. + * Applying KASAN instrumentation to copy_from_kernel_nofault() could + * lead to false positives. By focusing KASAN checks only on + * copy_to_kernel_nofault(), we ensure that only valid memory is + * written to the kernel, minimizing the risk of kernel corruption + * while avoiding false positives in the reverse case. + */ + KUNIT_EXPECT_KASAN_FAIL(test, + copy_to_kernel_nofault(&buf[0], ptr, size)); + KUNIT_EXPECT_KASAN_FAIL(test, + copy_to_kernel_nofault(ptr, &buf[0], size)); + + kfree(ptr); +} + +static void copy_user_test_oob(struct kunit *test) +{ + char *kmem; + char __user *usermem; + unsigned long useraddr; + size_t size = 128 - KASAN_GRANULE_SIZE; + int __maybe_unused unused; + + kmem = kunit_kmalloc(test, size, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, kmem); + + useraddr = kunit_vm_mmap(test, NULL, 0, PAGE_SIZE, + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_ANONYMOUS | MAP_PRIVATE, 0); + KUNIT_ASSERT_NE_MSG(test, useraddr, 0, + "Could not create userspace mm"); + KUNIT_ASSERT_LT_MSG(test, useraddr, (unsigned long)TASK_SIZE, + "Failed to allocate user memory"); + + OPTIMIZER_HIDE_VAR(size); + usermem = (char __user *)useraddr; + + KUNIT_EXPECT_KASAN_FAIL(test, + unused = copy_from_user(kmem, usermem, size + 1)); + KUNIT_EXPECT_KASAN_FAIL(test, + unused = copy_to_user(usermem, kmem, size + 1)); + KUNIT_EXPECT_KASAN_FAIL(test, + unused = __copy_from_user(kmem, usermem, size + 1)); + KUNIT_EXPECT_KASAN_FAIL(test, + unused = __copy_to_user(usermem, kmem, size + 1)); + KUNIT_EXPECT_KASAN_FAIL(test, + unused = __copy_from_user_inatomic(kmem, usermem, size + 1)); + KUNIT_EXPECT_KASAN_FAIL(test, + unused = __copy_to_user_inatomic(usermem, kmem, size + 1)); + + /* + * Prepare a long string in usermem to avoid the strncpy_from_user test + * bailing out on '\0' before it reaches out-of-bounds. + */ + memset(kmem, 'a', size); + KUNIT_EXPECT_EQ(test, copy_to_user(usermem, kmem, size), 0); + + KUNIT_EXPECT_KASAN_FAIL(test, + unused = strncpy_from_user(kmem, usermem, size + 1)); +} + static struct kunit_case kasan_kunit_test_cases[] = { KUNIT_CASE(kmalloc_oob_right), KUNIT_CASE(kmalloc_oob_left), KUNIT_CASE(kmalloc_node_oob_right), + KUNIT_CASE(kmalloc_track_caller_oob_right), KUNIT_CASE(kmalloc_big_oob_right), KUNIT_CASE(kmalloc_large_oob_right), KUNIT_CASE(kmalloc_large_uaf), @@ -1937,6 +2097,7 @@ static struct kunit_case kasan_kunit_test_cases[] = { KUNIT_CASE(kmem_cache_oob), KUNIT_CASE(kmem_cache_double_free), KUNIT_CASE(kmem_cache_invalid_free), + KUNIT_CASE(kmem_cache_rcu_uaf), KUNIT_CASE(kmem_cache_double_destroy), KUNIT_CASE(kmem_cache_accounted), KUNIT_CASE(kmem_cache_bulk), @@ -1962,15 +2123,17 @@ static struct kunit_case kasan_kunit_test_cases[] = { KUNIT_CASE(kasan_strings), KUNIT_CASE(kasan_bitops_generic), KUNIT_CASE(kasan_bitops_tags), - KUNIT_CASE(kasan_atomics), + KUNIT_CASE_SLOW(kasan_atomics), KUNIT_CASE(vmalloc_helpers_tags), KUNIT_CASE(vmalloc_oob), KUNIT_CASE(vmap_tags), KUNIT_CASE(vm_map_ram_tags), - KUNIT_CASE(vmalloc_percpu), KUNIT_CASE(match_all_not_assigned), KUNIT_CASE(match_all_ptr_tag), KUNIT_CASE(match_all_mem_tag), + KUNIT_CASE(copy_to_kernel_nofault_oob), + KUNIT_CASE(rust_uaf), + KUNIT_CASE(copy_user_test_oob), {} }; @@ -1984,4 +2147,5 @@ static struct kunit_suite kasan_kunit_test_suite = { kunit_test_suite(kasan_kunit_test_suite); +MODULE_DESCRIPTION("KUnit tests for checking KASAN bug-detection capabilities"); MODULE_LICENSE("GPL"); diff --git a/mm/kasan/kasan_test_module.c b/mm/kasan/kasan_test_module.c deleted file mode 100644 index 27ec22767e42..000000000000 --- a/mm/kasan/kasan_test_module.c +++ /dev/null @@ -1,81 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * - * Copyright (c) 2014 Samsung Electronics Co., Ltd. - * Author: Andrey Ryabinin <a.ryabinin@samsung.com> - */ - -#define pr_fmt(fmt) "kasan: test: " fmt - -#include <linux/mman.h> -#include <linux/module.h> -#include <linux/printk.h> -#include <linux/slab.h> -#include <linux/uaccess.h> - -#include "kasan.h" - -static noinline void __init copy_user_test(void) -{ - char *kmem; - char __user *usermem; - size_t size = 128 - KASAN_GRANULE_SIZE; - int __maybe_unused unused; - - kmem = kmalloc(size, GFP_KERNEL); - if (!kmem) - return; - - usermem = (char __user *)vm_mmap(NULL, 0, PAGE_SIZE, - PROT_READ | PROT_WRITE | PROT_EXEC, - MAP_ANONYMOUS | MAP_PRIVATE, 0); - if (IS_ERR(usermem)) { - pr_err("Failed to allocate user memory\n"); - kfree(kmem); - return; - } - - OPTIMIZER_HIDE_VAR(size); - - pr_info("out-of-bounds in copy_from_user()\n"); - unused = copy_from_user(kmem, usermem, size + 1); - - pr_info("out-of-bounds in copy_to_user()\n"); - unused = copy_to_user(usermem, kmem, size + 1); - - pr_info("out-of-bounds in __copy_from_user()\n"); - unused = __copy_from_user(kmem, usermem, size + 1); - - pr_info("out-of-bounds in __copy_to_user()\n"); - unused = __copy_to_user(usermem, kmem, size + 1); - - pr_info("out-of-bounds in __copy_from_user_inatomic()\n"); - unused = __copy_from_user_inatomic(kmem, usermem, size + 1); - - pr_info("out-of-bounds in __copy_to_user_inatomic()\n"); - unused = __copy_to_user_inatomic(usermem, kmem, size + 1); - - pr_info("out-of-bounds in strncpy_from_user()\n"); - unused = strncpy_from_user(kmem, usermem, size + 1); - - vm_munmap((unsigned long)usermem, PAGE_SIZE); - kfree(kmem); -} - -static int __init kasan_test_module_init(void) -{ - /* - * Temporarily enable multi-shot mode. Otherwise, KASAN would only - * report the first detected bug and panic the kernel if panic_on_warn - * is enabled. - */ - bool multishot = kasan_save_enable_multi_shot(); - - copy_user_test(); - - kasan_restore_multi_shot(multishot); - return -EAGAIN; -} - -module_init(kasan_test_module_init); -MODULE_LICENSE("GPL"); diff --git a/mm/kasan/kasan_test_rust.rs b/mm/kasan/kasan_test_rust.rs new file mode 100644 index 000000000000..5b34edf30e72 --- /dev/null +++ b/mm/kasan/kasan_test_rust.rs @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Helper crate for KASAN testing. +//! +//! Provides behavior to check the sanitization of Rust code. + +use core::ptr::addr_of_mut; +use kernel::prelude::*; + +/// Trivial UAF - allocate a big vector, grab a pointer partway through, +/// drop the vector, and touch it. +#[no_mangle] +pub extern "C" fn kasan_test_rust_uaf() -> u8 { + let mut v: KVec<u8> = KVec::new(); + for _ in 0..4096 { + v.push(0x42, GFP_KERNEL).unwrap(); + } + let ptr: *mut u8 = addr_of_mut!(v[2048]); + drop(v); + // SAFETY: Incorrect, on purpose. + unsafe { *ptr } +} diff --git a/mm/kasan/report.c b/mm/kasan/report.c index b48c768acc84..62c01b4527eb 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -10,6 +10,7 @@ */ #include <kunit/test.h> +#include <kunit/visibility.h> #include <linux/bitops.h> #include <linux/ftrace.h> #include <linux/init.h> @@ -132,20 +133,20 @@ static bool report_enabled(void) return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags); } -#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) || IS_ENABLED(CONFIG_KASAN_MODULE_TEST) +#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) -bool kasan_save_enable_multi_shot(void) +VISIBLE_IF_KUNIT bool kasan_save_enable_multi_shot(void) { return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); } -EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot); +EXPORT_SYMBOL_IF_KUNIT(kasan_save_enable_multi_shot); -void kasan_restore_multi_shot(bool enabled) +VISIBLE_IF_KUNIT void kasan_restore_multi_shot(bool enabled) { if (!enabled) clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); } -EXPORT_SYMBOL_GPL(kasan_restore_multi_shot); +EXPORT_SYMBOL_IF_KUNIT(kasan_restore_multi_shot); #endif @@ -157,17 +158,17 @@ EXPORT_SYMBOL_GPL(kasan_restore_multi_shot); */ static bool kasan_kunit_executing; -void kasan_kunit_test_suite_start(void) +VISIBLE_IF_KUNIT void kasan_kunit_test_suite_start(void) { WRITE_ONCE(kasan_kunit_executing, true); } -EXPORT_SYMBOL_GPL(kasan_kunit_test_suite_start); +EXPORT_SYMBOL_IF_KUNIT(kasan_kunit_test_suite_start); -void kasan_kunit_test_suite_end(void) +VISIBLE_IF_KUNIT void kasan_kunit_test_suite_end(void) { WRITE_ONCE(kasan_kunit_executing, false); } -EXPORT_SYMBOL_GPL(kasan_kunit_test_suite_end); +EXPORT_SYMBOL_IF_KUNIT(kasan_kunit_test_suite_end); static bool kasan_kunit_test_suite_executing(void) { @@ -200,7 +201,7 @@ static inline void fail_non_kasan_kunit_test(void) { } #endif /* CONFIG_KUNIT */ -static DEFINE_SPINLOCK(report_lock); +static DEFINE_RAW_SPINLOCK(report_lock); static void start_report(unsigned long *flags, bool sync) { @@ -211,7 +212,7 @@ static void start_report(unsigned long *flags, bool sync) lockdep_off(); /* Make sure we don't end up in loop. */ report_suppress_start(); - spin_lock_irqsave(&report_lock, *flags); + raw_spin_lock_irqsave(&report_lock, *flags); pr_err("==================================================================\n"); } @@ -221,7 +222,7 @@ static void end_report(unsigned long *flags, const void *addr, bool is_write) trace_error_report_end(ERROR_DETECTOR_KASAN, (unsigned long)addr); pr_err("==================================================================\n"); - spin_unlock_irqrestore(&report_lock, *flags); + raw_spin_unlock_irqrestore(&report_lock, *flags); if (!test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags)) check_panic_on_warn("KASAN"); switch (kasan_arg_fault) { @@ -398,17 +399,10 @@ static void print_address_description(void *addr, u8 tag, } if (is_vmalloc_addr(addr)) { - struct vm_struct *va = find_vm_area(addr); - - if (va) { - pr_err("The buggy address belongs to the virtual mapping at\n" - " [%px, %px) created by:\n" - " %pS\n", - va->addr, va->addr + va->size, va->caller); - pr_err("\n"); - - page = vmalloc_to_page(addr); - } + pr_err("The buggy address belongs to a"); + if (!vmalloc_dump_obj(addr)) + pr_cont(" vmalloc virtual mapping\n"); + page = vmalloc_to_page(addr); } if (page) { diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index d6210ca48dda..d2c70cd2afb1 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -292,33 +292,99 @@ void __init __weak kasan_populate_early_vm_area_shadow(void *start, { } +struct vmalloc_populate_data { + unsigned long start; + struct page **pages; +}; + static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr, - void *unused) + void *_data) { - unsigned long page; + struct vmalloc_populate_data *data = _data; + struct page *page; pte_t pte; + int index; if (likely(!pte_none(ptep_get(ptep)))) return 0; - page = __get_free_page(GFP_KERNEL); - if (!page) - return -ENOMEM; - - __memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE); - pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL); + index = PFN_DOWN(addr - data->start); + page = data->pages[index]; + __memset(page_to_virt(page), KASAN_VMALLOC_INVALID, PAGE_SIZE); + pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL); spin_lock(&init_mm.page_table_lock); if (likely(pte_none(ptep_get(ptep)))) { set_pte_at(&init_mm, addr, ptep, pte); - page = 0; + data->pages[index] = NULL; } spin_unlock(&init_mm.page_table_lock); - if (page) - free_page(page); + return 0; } +static void ___free_pages_bulk(struct page **pages, int nr_pages) +{ + int i; + + for (i = 0; i < nr_pages; i++) { + if (pages[i]) { + __free_pages(pages[i], 0); + pages[i] = NULL; + } + } +} + +static int ___alloc_pages_bulk(struct page **pages, int nr_pages) +{ + unsigned long nr_populated, nr_total = nr_pages; + struct page **page_array = pages; + + while (nr_pages) { + nr_populated = alloc_pages_bulk(GFP_KERNEL, nr_pages, pages); + if (!nr_populated) { + ___free_pages_bulk(page_array, nr_total - nr_pages); + return -ENOMEM; + } + pages += nr_populated; + nr_pages -= nr_populated; + } + + return 0; +} + +static int __kasan_populate_vmalloc(unsigned long start, unsigned long end) +{ + unsigned long nr_pages, nr_total = PFN_UP(end - start); + struct vmalloc_populate_data data; + int ret = 0; + + data.pages = (struct page **)__get_free_page(GFP_KERNEL | __GFP_ZERO); + if (!data.pages) + return -ENOMEM; + + while (nr_total) { + nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0])); + ret = ___alloc_pages_bulk(data.pages, nr_pages); + if (ret) + break; + + data.start = start; + ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE, + kasan_populate_vmalloc_pte, &data); + ___free_pages_bulk(data.pages, nr_pages); + if (ret) + break; + + start += nr_pages * PAGE_SIZE; + nr_total -= nr_pages; + } + + free_page((unsigned long)data.pages); + + return ret; +} + int kasan_populate_vmalloc(unsigned long addr, unsigned long size) { unsigned long shadow_start, shadow_end; @@ -348,9 +414,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size) shadow_start = PAGE_ALIGN_DOWN(shadow_start); shadow_end = PAGE_ALIGN(shadow_end); - ret = apply_to_page_range(&init_mm, shadow_start, - shadow_end - shadow_start, - kasan_populate_vmalloc_pte, NULL); + ret = __kasan_populate_vmalloc(shadow_start, shadow_end); if (ret) return ret; @@ -489,7 +553,8 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, */ void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, - unsigned long free_region_end) + unsigned long free_region_end, + unsigned long flags) { void *shadow_start, *shadow_end; unsigned long region_start, region_end; @@ -522,12 +587,17 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end, __memset(shadow_start, KASAN_SHADOW_INIT, shadow_end - shadow_start); return; } - apply_to_existing_page_range(&init_mm, + + + if (flags & KASAN_VMALLOC_PAGE_RANGE) + apply_to_existing_page_range(&init_mm, (unsigned long)shadow_start, size, kasan_depopulate_vmalloc_pte, NULL); - flush_tlb_kernel_range((unsigned long)shadow_start, - (unsigned long)shadow_end); + + if (flags & KASAN_VMALLOC_TLB_FLUSH) + flush_tlb_kernel_range((unsigned long)shadow_start, + (unsigned long)shadow_end); } } diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c index 220b5d4c6876..b9382b5b6a37 100644 --- a/mm/kasan/sw_tags.c +++ b/mm/kasan/sw_tags.c @@ -26,6 +26,7 @@ #include <linux/slab.h> #include <linux/stacktrace.h> #include <linux/string.h> +#include <linux/string_choices.h> #include <linux/types.h> #include <linux/vmalloc.h> #include <linux/bug.h> @@ -45,7 +46,7 @@ void __init kasan_init_sw_tags(void) kasan_init_tags(); pr_info("KernelAddressSanitizer initialized (sw-tags, stacktrace=%s)\n", - kasan_stack_collection_enabled() ? "on" : "off"); + str_on_off(kasan_stack_collection_enabled())); } /* |