diff options
Diffstat (limited to 'mm/kasan/report.c')
| -rw-r--r-- | mm/kasan/report.c | 93 |
1 files changed, 53 insertions, 40 deletions
diff --git a/mm/kasan/report.c b/mm/kasan/report.c index ca4b6ff080a6..62c01b4527eb 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -10,6 +10,7 @@ */ #include <kunit/test.h> +#include <kunit/visibility.h> #include <linux/bitops.h> #include <linux/ftrace.h> #include <linux/init.h> @@ -23,6 +24,7 @@ #include <linux/stacktrace.h> #include <linux/string.h> #include <linux/types.h> +#include <linux/vmalloc.h> #include <linux/kasan.h> #include <linux/module.h> #include <linux/sched/task_stack.h> @@ -131,20 +133,20 @@ static bool report_enabled(void) return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags); } -#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) || IS_ENABLED(CONFIG_KASAN_MODULE_TEST) +#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) -bool kasan_save_enable_multi_shot(void) +VISIBLE_IF_KUNIT bool kasan_save_enable_multi_shot(void) { return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); } -EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot); +EXPORT_SYMBOL_IF_KUNIT(kasan_save_enable_multi_shot); -void kasan_restore_multi_shot(bool enabled) +VISIBLE_IF_KUNIT void kasan_restore_multi_shot(bool enabled) { if (!enabled) clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags); } -EXPORT_SYMBOL_GPL(kasan_restore_multi_shot); +EXPORT_SYMBOL_IF_KUNIT(kasan_restore_multi_shot); #endif @@ -156,17 +158,17 @@ EXPORT_SYMBOL_GPL(kasan_restore_multi_shot); */ static bool kasan_kunit_executing; -void kasan_kunit_test_suite_start(void) +VISIBLE_IF_KUNIT void kasan_kunit_test_suite_start(void) { WRITE_ONCE(kasan_kunit_executing, true); } -EXPORT_SYMBOL_GPL(kasan_kunit_test_suite_start); +EXPORT_SYMBOL_IF_KUNIT(kasan_kunit_test_suite_start); -void kasan_kunit_test_suite_end(void) +VISIBLE_IF_KUNIT void kasan_kunit_test_suite_end(void) { WRITE_ONCE(kasan_kunit_executing, false); } -EXPORT_SYMBOL_GPL(kasan_kunit_test_suite_end); +EXPORT_SYMBOL_IF_KUNIT(kasan_kunit_test_suite_end); static bool kasan_kunit_test_suite_executing(void) { @@ -199,7 +201,7 @@ static inline void fail_non_kasan_kunit_test(void) { } #endif /* CONFIG_KUNIT */ -static DEFINE_SPINLOCK(report_lock); +static DEFINE_RAW_SPINLOCK(report_lock); static void start_report(unsigned long *flags, bool sync) { @@ -210,7 +212,7 @@ static void start_report(unsigned long *flags, bool sync) lockdep_off(); /* Make sure we don't end up in loop. */ report_suppress_start(); - spin_lock_irqsave(&report_lock, *flags); + raw_spin_lock_irqsave(&report_lock, *flags); pr_err("==================================================================\n"); } @@ -220,7 +222,7 @@ static void end_report(unsigned long *flags, const void *addr, bool is_write) trace_error_report_end(ERROR_DETECTOR_KASAN, (unsigned long)addr); pr_err("==================================================================\n"); - spin_unlock_irqrestore(&report_lock, *flags); + raw_spin_unlock_irqrestore(&report_lock, *flags); if (!test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags)) check_panic_on_warn("KASAN"); switch (kasan_arg_fault) { @@ -262,7 +264,19 @@ static void print_error_description(struct kasan_report_info *info) static void print_track(struct kasan_track *track, const char *prefix) { +#ifdef CONFIG_KASAN_EXTRA_INFO + u64 ts_nsec = track->timestamp; + unsigned long rem_usec; + + ts_nsec <<= 9; + rem_usec = do_div(ts_nsec, NSEC_PER_SEC) / 1000; + + pr_err("%s by task %u on cpu %d at %lu.%06lus:\n", + prefix, track->pid, track->cpu, + (unsigned long)ts_nsec, rem_usec); +#else pr_err("%s by task %u:\n", prefix, track->pid); +#endif /* CONFIG_KASAN_EXTRA_INFO */ if (track->stack) stack_depot_print(track->stack); else @@ -385,17 +399,10 @@ static void print_address_description(void *addr, u8 tag, } if (is_vmalloc_addr(addr)) { - struct vm_struct *va = find_vm_area(addr); - - if (va) { - pr_err("The buggy address belongs to the virtual mapping at\n" - " [%px, %px) created by:\n" - " %pS\n", - va->addr, va->addr + va->size, va->caller); - pr_err("\n"); - - page = vmalloc_to_page(addr); - } + pr_err("The buggy address belongs to a"); + if (!vmalloc_dump_obj(addr)) + pr_cont(" vmalloc virtual mapping\n"); + page = vmalloc_to_page(addr); } if (page) { @@ -538,7 +545,7 @@ void kasan_report_invalid_free(void *ptr, unsigned long ip, enum kasan_report_ty start_report(&flags, true); - memset(&info, 0, sizeof(info)); + __memset(&info, 0, sizeof(info)); info.type = type; info.access_addr = ptr; info.access_size = 0; @@ -576,7 +583,7 @@ bool kasan_report(const void *addr, size_t size, bool is_write, start_report(&irq_flags, true); - memset(&info, 0, sizeof(info)); + __memset(&info, 0, sizeof(info)); info.type = KASAN_REPORT_ACCESS; info.access_addr = addr; info.access_size = size; @@ -621,39 +628,45 @@ void kasan_report_async(void) } #endif /* CONFIG_KASAN_HW_TAGS */ -#ifdef CONFIG_KASAN_INLINE +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) /* - * With CONFIG_KASAN_INLINE, accesses to bogus pointers (outside the high - * canonical half of the address space) cause out-of-bounds shadow memory reads - * before the actual access. For addresses in the low canonical half of the - * address space, as well as most non-canonical addresses, that out-of-bounds - * shadow memory access lands in the non-canonical part of the address space. - * Help the user figure out what the original bogus pointer was. + * With compiler-based KASAN modes, accesses to bogus pointers (outside of the + * mapped kernel address space regions) cause faults when KASAN tries to check + * the shadow memory before the actual memory access. This results in cryptic + * GPF reports, which are hard for users to interpret. This hook helps users to + * figure out what the original bogus pointer was. */ void kasan_non_canonical_hook(unsigned long addr) { unsigned long orig_addr; const char *bug_type; + /* + * All addresses that came as a result of the memory-to-shadow mapping + * (even for bogus pointers) must be >= KASAN_SHADOW_OFFSET. + */ if (addr < KASAN_SHADOW_OFFSET) return; - orig_addr = (addr - KASAN_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT; + orig_addr = (unsigned long)kasan_shadow_to_mem((void *)addr); + /* * For faults near the shadow address for NULL, we can be fairly certain * that this is a KASAN shadow memory access. - * For faults that correspond to shadow for low canonical addresses, we - * can still be pretty sure - that shadow region is a fairly narrow - * chunk of the non-canonical address space. - * But faults that look like shadow for non-canonical addresses are a - * really large chunk of the address space. In that case, we still - * print the decoded address, but make it clear that this is not - * necessarily what's actually going on. + * For faults that correspond to the shadow for low or high canonical + * addresses, we can still be pretty sure: these shadow regions are a + * fairly narrow chunk of the address space. + * But the shadow for non-canonical addresses is a really large chunk + * of the address space. For this case, we still print the decoded + * address, but make it clear that this is not necessarily what's + * actually going on. */ if (orig_addr < PAGE_SIZE) bug_type = "null-ptr-deref"; else if (orig_addr < TASK_SIZE) bug_type = "probably user-memory-access"; + else if (addr_in_shadow((void *)addr)) + bug_type = "probably wild-memory-access"; else bug_type = "maybe wild-memory-access"; pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type, |
