summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/um/Kconfig15
-rw-r--r--arch/um/include/asm/common.lds.S2
-rw-r--r--arch/um/include/asm/kasan.h37
-rw-r--r--arch/um/kernel/dyn.lds.S6
-rw-r--r--arch/um/kernel/mem.c19
-rw-r--r--arch/um/kernel/stacktrace.c2
-rw-r--r--arch/um/os-Linux/mem.c22
-rw-r--r--arch/um/os-Linux/user_syms.c4
-rw-r--r--arch/x86/um/Makefile3
-rw-r--r--arch/x86/um/vdso/Makefile3
-rw-r--r--mm/kasan/shadow.c29
11 files changed, 135 insertions, 7 deletions
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 7b0f953e7d6c..78de31ac1da7 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -12,6 +12,8 @@ config UML
select ARCH_HAS_STRNLEN_USER
select ARCH_NO_PREEMPT
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_KASAN if X86_64
+ select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ASM_MODVERSIONS
select HAVE_UID16
@@ -219,6 +221,19 @@ config UML_TIME_TRAVEL_SUPPORT
It is safe to say Y, but you probably don't need this.
+config KASAN_SHADOW_OFFSET
+ hex
+ depends on KASAN
+ default 0x100000000000
+ help
+ This is the offset at which the ~16TB of shadow memory is
+ mapped and used by KASAN for memory debugging. This can be any
+ address that has at least KASAN_SHADOW_SIZE (total address space divided
+ by 8) amount of space so that the KASAN shadow memory does not conflict
+ with anything. The default is 0x100000000000, which works even if mem is
+ set to a large value. On low-memory systems, try 0x7fff8000, as it fits
+ into the immediate of most instructions, improving performance.
+
endmenu
source "arch/um/drivers/Kconfig"
diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S
index eca6c452a41b..fd481ac371de 100644
--- a/arch/um/include/asm/common.lds.S
+++ b/arch/um/include/asm/common.lds.S
@@ -83,6 +83,8 @@
}
.init_array : {
__init_array_start = .;
+ *(.kasan_init)
+ *(.init_array.*)
*(.init_array)
__init_array_end = .;
}
diff --git a/arch/um/include/asm/kasan.h b/arch/um/include/asm/kasan.h
new file mode 100644
index 000000000000..0d6547f4ec85
--- /dev/null
+++ b/arch/um/include/asm/kasan.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_UM_KASAN_H
+#define __ASM_UM_KASAN_H
+
+#include <linux/init.h>
+#include <linux/const.h>
+
+#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+
+/* used in kasan_mem_to_shadow to divide by 8 */
+#define KASAN_SHADOW_SCALE_SHIFT 3
+
+#ifdef CONFIG_X86_64
+#define KASAN_HOST_USER_SPACE_END_ADDR 0x00007fffffffffffUL
+/* KASAN_SHADOW_SIZE is the size of total address space divided by 8 */
+#define KASAN_SHADOW_SIZE ((KASAN_HOST_USER_SPACE_END_ADDR + 1) >> \
+ KASAN_SHADOW_SCALE_SHIFT)
+#else
+#error "KASAN_SHADOW_SIZE is not defined for this sub-architecture"
+#endif /* CONFIG_X86_64 */
+
+#define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET)
+#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
+
+#ifdef CONFIG_KASAN
+void kasan_init(void);
+void kasan_map_memory(void *start, unsigned long len);
+extern int kasan_um_is_ready;
+
+#ifdef CONFIG_STATIC_LINK
+#define kasan_arch_is_ready() (kasan_um_is_ready)
+#endif
+#else
+static inline void kasan_init(void) { }
+#endif /* CONFIG_KASAN */
+
+#endif /* __ASM_UM_KASAN_H */
diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
index 2f2a8ce92f1e..2b7fc5b54164 100644
--- a/arch/um/kernel/dyn.lds.S
+++ b/arch/um/kernel/dyn.lds.S
@@ -109,7 +109,11 @@ SECTIONS
be empty, which isn't pretty. */
. = ALIGN(32 / 8);
.preinit_array : { *(.preinit_array) }
- .init_array : { *(.init_array) }
+ .init_array : {
+ *(.kasan_init)
+ *(.init_array.*)
+ *(.init_array)
+ }
.fini_array : { *(.fini_array) }
.data : {
INIT_TASK_DATA(KERNEL_STACK_SIZE)
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 15295c3237a0..276a1f0b91f1 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -18,6 +18,25 @@
#include <kern_util.h>
#include <mem_user.h>
#include <os.h>
+#include <linux/sched/task.h>
+
+#ifdef CONFIG_KASAN
+int kasan_um_is_ready;
+void kasan_init(void)
+{
+ /*
+ * kasan_map_memory will map all of the required address space and
+ * the host machine will allocate physical memory as necessary.
+ */
+ kasan_map_memory((void *)KASAN_SHADOW_START, KASAN_SHADOW_SIZE);
+ init_task.kasan_depth = 0;
+ kasan_um_is_ready = true;
+}
+
+static void (*kasan_init_ptr)(void)
+__section(".kasan_init") __used
+= kasan_init;
+#endif
/* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
unsigned long *empty_zero_page = NULL;
diff --git a/arch/um/kernel/stacktrace.c b/arch/um/kernel/stacktrace.c
index 86df52168bd9..fd3b61b3d4d2 100644
--- a/arch/um/kernel/stacktrace.c
+++ b/arch/um/kernel/stacktrace.c
@@ -27,7 +27,7 @@ void dump_trace(struct task_struct *tsk,
frame = (struct stack_frame *)bp;
while (((long) sp & (THREAD_SIZE-1)) != 0) {
- addr = *sp;
+ addr = READ_ONCE_NOCHECK(*sp);
if (__kernel_text_address(addr)) {
reliable = 0;
if ((unsigned long) sp == bp + sizeof(long)) {
diff --git a/arch/um/os-Linux/mem.c b/arch/um/os-Linux/mem.c
index 3c1b77474d2d..8530b2e08604 100644
--- a/arch/um/os-Linux/mem.c
+++ b/arch/um/os-Linux/mem.c
@@ -17,6 +17,28 @@
#include <init.h>
#include <os.h>
+/*
+ * kasan_map_memory - maps memory from @start with a size of @len.
+ * The allocated memory is filled with zeroes upon success.
+ * @start: the start address of the memory to be mapped
+ * @len: the length of the memory to be mapped
+ *
+ * This function is used to map shadow memory for KASAN in uml
+ */
+void kasan_map_memory(void *start, size_t len)
+{
+ if (mmap(start,
+ len,
+ PROT_READ|PROT_WRITE,
+ MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE,
+ -1,
+ 0) == MAP_FAILED) {
+ os_info("Couldn't allocate shadow memory: %s\n.",
+ strerror(errno));
+ exit(1);
+ }
+}
+
/* Set by make_tempfile() during early boot. */
static char *tempdir = NULL;
diff --git a/arch/um/os-Linux/user_syms.c b/arch/um/os-Linux/user_syms.c
index 715594fe5719..cb667c9225ab 100644
--- a/arch/um/os-Linux/user_syms.c
+++ b/arch/um/os-Linux/user_syms.c
@@ -27,10 +27,10 @@ EXPORT_SYMBOL(strstr);
#ifndef __x86_64__
extern void *memcpy(void *, const void *, size_t);
EXPORT_SYMBOL(memcpy);
-#endif
-
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memset);
+#endif
+
EXPORT_SYMBOL(printf);
/* Here, instead, I can provide a fake prototype. Yes, someone cares: genksyms.
diff --git a/arch/x86/um/Makefile b/arch/x86/um/Makefile
index ba5789c35809..f778e37494ba 100644
--- a/arch/x86/um/Makefile
+++ b/arch/x86/um/Makefile
@@ -28,7 +28,8 @@ else
obj-y += syscalls_64.o vdso/
-subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../entry/thunk_64.o
+subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../entry/thunk_64.o \
+ ../lib/memmove_64.o ../lib/memset_64.o
endif
diff --git a/arch/x86/um/vdso/Makefile b/arch/x86/um/vdso/Makefile
index 5943387e3f35..8c0396fd0e6f 100644
--- a/arch/x86/um/vdso/Makefile
+++ b/arch/x86/um/vdso/Makefile
@@ -3,6 +3,9 @@
# Building vDSO images for x86.
#
+# do not instrument on vdso because KASAN is not compatible with user mode
+KASAN_SANITIZE := n
+
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
KCOV_INSTRUMENT := n
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index a4f07de21771..0e3648b603a6 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -295,9 +295,22 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
return 0;
shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr);
- shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size);
- shadow_end = ALIGN(shadow_end, PAGE_SIZE);
+
+ /*
+ * User Mode Linux maps enough shadow memory for all of virtual memory
+ * at boot, so doesn't need to allocate more on vmalloc, just clear it.
+ *
+ * The remaining CONFIG_UML checks in this file exist for the same
+ * reason.
+ */
+ if (IS_ENABLED(CONFIG_UML)) {
+ __memset((void *)shadow_start, KASAN_VMALLOC_INVALID, shadow_end - shadow_start);
+ return 0;
+ }
+
+ shadow_start = PAGE_ALIGN_DOWN(shadow_start);
+ shadow_end = PAGE_ALIGN(shadow_end);
ret = apply_to_page_range(&init_mm, shadow_start,
shadow_end - shadow_start,
@@ -466,6 +479,10 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
if (shadow_end > shadow_start) {
size = shadow_end - shadow_start;
+ if (IS_ENABLED(CONFIG_UML)) {
+ __memset(shadow_start, KASAN_SHADOW_INIT, shadow_end - shadow_start);
+ return;
+ }
apply_to_existing_page_range(&init_mm,
(unsigned long)shadow_start,
size, kasan_depopulate_vmalloc_pte,
@@ -531,6 +548,11 @@ int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask)
if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
return -EINVAL;
+ if (IS_ENABLED(CONFIG_UML)) {
+ __memset((void *)shadow_start, KASAN_SHADOW_INIT, shadow_size);
+ return 0;
+ }
+
ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
shadow_start + shadow_size,
GFP_KERNEL,
@@ -554,6 +576,9 @@ int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask)
void kasan_free_module_shadow(const struct vm_struct *vm)
{
+ if (IS_ENABLED(CONFIG_UML))
+ return;
+
if (vm->flags & VM_KASAN)
vfree(kasan_mem_to_shadow(vm->addr));
}