diff options
Diffstat (limited to 'drivers/misc/lkdtm')
| -rw-r--r-- | drivers/misc/lkdtm/Makefile | 11 | ||||
| -rw-r--r-- | drivers/misc/lkdtm/bugs.c | 351 | ||||
| -rw-r--r-- | drivers/misc/lkdtm/cfi.c | 175 | ||||
| -rw-r--r-- | drivers/misc/lkdtm/core.c | 180 | ||||
| -rw-r--r-- | drivers/misc/lkdtm/fortify.c | 142 | ||||
| -rw-r--r-- | drivers/misc/lkdtm/heap.c | 135 | ||||
| -rw-r--r-- | drivers/misc/lkdtm/kstack_erase.c | 150 | ||||
| -rw-r--r-- | drivers/misc/lkdtm/lkdtm.h | 166 | ||||
| -rw-r--r-- | drivers/misc/lkdtm/perms.c | 131 | ||||
| -rw-r--r-- | drivers/misc/lkdtm/powerpc.c | 11 | ||||
| -rw-r--r-- | drivers/misc/lkdtm/refcount.c | 81 | ||||
| -rw-r--r-- | drivers/misc/lkdtm/stackleak.c | 82 | ||||
| -rw-r--r-- | drivers/misc/lkdtm/usercopy.c | 148 |
13 files changed, 1255 insertions, 508 deletions
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile index aa12097668d3..03ebe33185f9 100644 --- a/drivers/misc/lkdtm/Makefile +++ b/drivers/misc/lkdtm/Makefile @@ -8,19 +8,18 @@ lkdtm-$(CONFIG_LKDTM) += perms.o lkdtm-$(CONFIG_LKDTM) += refcount.o lkdtm-$(CONFIG_LKDTM) += rodata_objcopy.o lkdtm-$(CONFIG_LKDTM) += usercopy.o -lkdtm-$(CONFIG_LKDTM) += stackleak.o +lkdtm-$(CONFIG_LKDTM) += kstack_erase.o lkdtm-$(CONFIG_LKDTM) += cfi.o lkdtm-$(CONFIG_LKDTM) += fortify.o -lkdtm-$(CONFIG_PPC_BOOK3S_64) += powerpc.o +lkdtm-$(CONFIG_PPC_64S_HASH_MMU) += powerpc.o -KASAN_SANITIZE_rodata.o := n KASAN_SANITIZE_stackleak.o := n -KCOV_INSTRUMENT_rodata.o := n -CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) + +CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS) $(CC_FLAGS_CFI) OBJCOPYFLAGS := OBJCOPYFLAGS_rodata_objcopy.o := \ - --rename-section .noinstr.text=.rodata,alloc,readonly,load + --rename-section .noinstr.text=.rodata,alloc,readonly,load,contents targets += rodata.o rodata_objcopy.o $(obj)/rodata_objcopy.o: $(obj)/rodata.o FORCE $(call if_changed,objcopy) diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c index 88c218a9f8b3..376047beea3d 100644 --- a/drivers/misc/lkdtm/bugs.c +++ b/drivers/misc/lkdtm/bugs.c @@ -6,12 +6,14 @@ * test source files. */ #include "lkdtm.h" +#include <linux/cpu.h> #include <linux/list.h> #include <linux/sched.h> #include <linux/sched/signal.h> #include <linux/sched/task_stack.h> -#include <linux/uaccess.h> #include <linux/slab.h> +#include <linux/stop_machine.h> +#include <linux/uaccess.h> #if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML) #include <asm/desc.h> @@ -29,7 +31,7 @@ struct lkdtm_list { #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0) #define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2) #else -#define REC_STACK_SIZE (THREAD_SIZE / 8) +#define REC_STACK_SIZE (THREAD_SIZE / 8UL) #endif #define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2) @@ -41,20 +43,22 @@ static DEFINE_SPINLOCK(lock_me_up); * Make sure compiler does not optimize this function or stack frame away: * - function marked noinline * - stack variables are marked volatile - * - stack variables are written (memset()) and read (pr_info()) - * - function has external effects (pr_info()) - * */ + * - stack variables are written (memset()) and read (buf[..] passed as arg) + * - function may have external effects (memzero_explicit()) + * - no tail recursion possible + */ static int noinline recursive_loop(int remaining) { volatile char buf[REC_STACK_SIZE]; + volatile int ret; memset((void *)buf, remaining & 0xFF, sizeof(buf)); - pr_info("loop %d/%d ...\n", (int)buf[remaining % sizeof(buf)], - recur_count); if (!remaining) - return 0; + ret = 0; else - return recursive_loop(remaining - 1); + ret = recursive_loop((int)buf[remaining % sizeof(buf)] - 1); + memzero_explicit((void *)buf, sizeof(buf)); + return ret; } /* If the depth is negative, use the default, otherwise keep parameter. */ @@ -66,40 +70,65 @@ void __init lkdtm_bugs_init(int *recur_param) recur_count = *recur_param; } -void lkdtm_PANIC(void) +static void lkdtm_PANIC(void) { panic("dumptest"); } -void lkdtm_BUG(void) +static int panic_stop_irqoff_fn(void *arg) +{ + atomic_t *v = arg; + + /* + * As stop_machine() disables interrupts, all CPUs within this function + * have interrupts disabled and cannot take a regular IPI. + * + * The last CPU which enters here will trigger a panic, and as all CPUs + * cannot take a regular IPI, we'll only be able to stop secondaries if + * smp_send_stop() or crash_smp_send_stop() uses an NMI. + */ + if (atomic_inc_return(v) == num_online_cpus()) + panic("panic stop irqoff test"); + + for (;;) + cpu_relax(); +} + +static void lkdtm_PANIC_STOP_IRQOFF(void) +{ + atomic_t v = ATOMIC_INIT(0); + stop_machine(panic_stop_irqoff_fn, &v, cpu_online_mask); +} + +static void lkdtm_BUG(void) { BUG(); } static int warn_counter; -void lkdtm_WARNING(void) +static void lkdtm_WARNING(void) { WARN_ON(++warn_counter); } -void lkdtm_WARNING_MESSAGE(void) +static void lkdtm_WARNING_MESSAGE(void) { WARN(1, "Warning message trigger count: %d\n", ++warn_counter); } -void lkdtm_EXCEPTION(void) +static void lkdtm_EXCEPTION(void) { *((volatile int *) 0) = 0; } -void lkdtm_LOOP(void) +static void lkdtm_LOOP(void) { for (;;) ; } -void lkdtm_EXHAUST_STACK(void) +static void lkdtm_EXHAUST_STACK(void) { pr_info("Calling function with %lu frame size to depth %d ...\n", REC_STACK_SIZE, recur_count); @@ -113,7 +142,7 @@ static noinline void __lkdtm_CORRUPT_STACK(void *stack) } /* This should trip the stack canary, not corrupt the return address. */ -noinline void lkdtm_CORRUPT_STACK(void) +static noinline void lkdtm_CORRUPT_STACK(void) { /* Use default char array length that triggers stack protection. */ char data[8] __aligned(sizeof(void *)); @@ -123,7 +152,7 @@ noinline void lkdtm_CORRUPT_STACK(void) } /* Same as above but will only get a canary with -fstack-protector-strong */ -noinline void lkdtm_CORRUPT_STACK_STRONG(void) +static noinline void lkdtm_CORRUPT_STACK_STRONG(void) { union { unsigned short shorts[4]; @@ -137,7 +166,7 @@ noinline void lkdtm_CORRUPT_STACK_STRONG(void) static pid_t stack_pid; static unsigned long stack_addr; -void lkdtm_REPORT_STACK(void) +static void lkdtm_REPORT_STACK(void) { volatile uintptr_t magic; pid_t pid = task_pid_nr(current); @@ -151,7 +180,84 @@ void lkdtm_REPORT_STACK(void) pr_info("Stack offset: %d\n", (int)(stack_addr - (uintptr_t)&magic)); } -void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void) +static pid_t stack_canary_pid; +static unsigned long stack_canary; +static unsigned long stack_canary_offset; + +static noinline void __lkdtm_REPORT_STACK_CANARY(void *stack) +{ + int i = 0; + pid_t pid = task_pid_nr(current); + unsigned long *canary = (unsigned long *)stack; + unsigned long current_offset = 0, init_offset = 0; + + /* Do our best to find the canary in a 16 word window ... */ + for (i = 1; i < 16; i++) { + canary = (unsigned long *)stack + i; +#ifdef CONFIG_STACKPROTECTOR + if (*canary == current->stack_canary) + current_offset = i; + if (*canary == init_task.stack_canary) + init_offset = i; +#endif + } + + if (current_offset == 0) { + /* + * If the canary doesn't match what's in the task_struct, + * we're either using a global canary or the stack frame + * layout changed. + */ + if (init_offset != 0) { + pr_err("FAIL: global stack canary found at offset %ld (canary for pid %d matches init_task's)!\n", + init_offset, pid); + } else { + pr_warn("FAIL: did not correctly locate stack canary :(\n"); + pr_expected_config(CONFIG_STACKPROTECTOR); + } + + return; + } else if (init_offset != 0) { + pr_warn("WARNING: found both current and init_task canaries nearby?!\n"); + } + + canary = (unsigned long *)stack + current_offset; + if (stack_canary_pid == 0) { + stack_canary = *canary; + stack_canary_pid = pid; + stack_canary_offset = current_offset; + pr_info("Recorded stack canary for pid %d at offset %ld\n", + stack_canary_pid, stack_canary_offset); + } else if (pid == stack_canary_pid) { + pr_warn("ERROR: saw pid %d again -- please use a new pid\n", pid); + } else { + if (current_offset != stack_canary_offset) { + pr_warn("ERROR: canary offset changed from %ld to %ld!?\n", + stack_canary_offset, current_offset); + return; + } + + if (*canary == stack_canary) { + pr_warn("FAIL: canary identical for pid %d and pid %d at offset %ld!\n", + stack_canary_pid, pid, current_offset); + } else { + pr_info("ok: stack canaries differ between pid %d and pid %d at offset %ld.\n", + stack_canary_pid, pid, current_offset); + /* Reset the test. */ + stack_canary_pid = 0; + } + } +} + +static void lkdtm_REPORT_STACK_CANARY(void) +{ + /* Use default char array length that triggers stack protection. */ + char data[8] __aligned(sizeof(void *)) = { }; + + __lkdtm_REPORT_STACK_CANARY((void *)&data); +} + +static void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void) { static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5}; u32 *p; @@ -166,21 +272,50 @@ void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void) pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n"); } -void lkdtm_SOFTLOCKUP(void) +static void lkdtm_SOFTLOCKUP(void) { preempt_disable(); for (;;) cpu_relax(); } -void lkdtm_HARDLOCKUP(void) +static void lkdtm_HARDLOCKUP(void) { local_irq_disable(); for (;;) cpu_relax(); } -void lkdtm_SPINLOCKUP(void) +static void __lkdtm_SMP_CALL_LOCKUP(void *unused) +{ + for (;;) + cpu_relax(); +} + +static void lkdtm_SMP_CALL_LOCKUP(void) +{ + unsigned int cpu, target; + + cpus_read_lock(); + + cpu = get_cpu(); + target = cpumask_any_but(cpu_online_mask, cpu); + + if (target >= nr_cpu_ids) { + pr_err("FAIL: no other online CPUs\n"); + goto out_put_cpus; + } + + smp_call_function_single(target, __lkdtm_SMP_CALL_LOCKUP, NULL, 1); + + pr_err("FAIL: did not hang\n"); + +out_put_cpus: + put_cpu(); + cpus_read_unlock(); +} + +static void lkdtm_SPINLOCKUP(void) { /* Must be called twice to trigger. */ spin_lock(&lock_me_up); @@ -188,16 +323,17 @@ void lkdtm_SPINLOCKUP(void) __release(&lock_me_up); } -void lkdtm_HUNG_TASK(void) +static void __noreturn lkdtm_HUNG_TASK(void) { set_current_state(TASK_UNINTERRUPTIBLE); schedule(); + BUG(); } -volatile unsigned int huge = INT_MAX - 2; -volatile unsigned int ignored; +static volatile unsigned int huge = INT_MAX - 2; +static volatile unsigned int ignored; -void lkdtm_OVERFLOW_SIGNED(void) +static void lkdtm_OVERFLOW_SIGNED(void) { int value; @@ -212,7 +348,7 @@ void lkdtm_OVERFLOW_SIGNED(void) } -void lkdtm_OVERFLOW_UNSIGNED(void) +static void lkdtm_OVERFLOW_UNSIGNED(void) { unsigned int value; @@ -226,11 +362,11 @@ void lkdtm_OVERFLOW_UNSIGNED(void) ignored = value; } -/* Intentionally using old-style flex array definition of 1 byte. */ +/* Intentionally using unannotated flex array definition. */ struct array_bounds_flex_array { int one; int two; - char data[1]; + char data[]; }; struct array_bounds { @@ -240,7 +376,7 @@ struct array_bounds { int three; }; -void lkdtm_ARRAY_BOUNDS(void) +static void lkdtm_ARRAY_BOUNDS(void) { struct array_bounds_flex_array *not_checked; struct array_bounds *checked; @@ -248,6 +384,11 @@ void lkdtm_ARRAY_BOUNDS(void) not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL); checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL); + if (!not_checked || !checked) { + kfree(not_checked); + kfree(checked); + return; + } pr_info("Array access within bounds ...\n"); /* For both, touch all bytes in the actual member size. */ @@ -257,7 +398,7 @@ void lkdtm_ARRAY_BOUNDS(void) * For the uninstrumented flex array member, also touch 1 byte * beyond to verify it is correctly uninstrumented. */ - for (i = 0; i < sizeof(not_checked->data) + 1; i++) + for (i = 0; i < 2; i++) not_checked->data[i] = 'A'; pr_info("Array access beyond bounds ...\n"); @@ -267,9 +408,53 @@ void lkdtm_ARRAY_BOUNDS(void) kfree(not_checked); kfree(checked); pr_err("FAIL: survived array bounds overflow!\n"); + if (IS_ENABLED(CONFIG_UBSAN_BOUNDS)) + pr_expected_config(CONFIG_UBSAN_TRAP); + else + pr_expected_config(CONFIG_UBSAN_BOUNDS); +} + +struct lkdtm_annotated { + unsigned long flags; + int count; + int array[] __counted_by(count); +}; + +static volatile int fam_count = 4; + +static void lkdtm_FAM_BOUNDS(void) +{ + struct lkdtm_annotated *inst; + + inst = kzalloc(struct_size(inst, array, fam_count + 1), GFP_KERNEL); + if (!inst) { + pr_err("FAIL: could not allocate test struct!\n"); + return; + } + + inst->count = fam_count; + pr_info("Array access within bounds ...\n"); + inst->array[1] = fam_count; + ignored = inst->array[1]; + + pr_info("Array access beyond bounds ...\n"); + inst->array[fam_count] = fam_count; + ignored = inst->array[fam_count]; + + kfree(inst); + + pr_err("FAIL: survived access of invalid flexible array member index!\n"); + + if (!IS_ENABLED(CONFIG_CC_HAS_COUNTED_BY)) + pr_warn("This is expected since this %s was built with a compiler that does not support __counted_by\n", + lkdtm_kernel_info); + else if (IS_ENABLED(CONFIG_UBSAN_BOUNDS)) + pr_expected_config(CONFIG_UBSAN_TRAP); + else + pr_expected_config(CONFIG_UBSAN_BOUNDS); } -void lkdtm_CORRUPT_LIST_ADD(void) +static void lkdtm_CORRUPT_LIST_ADD(void) { /* * Initially, an empty list via LIST_HEAD: @@ -305,11 +490,11 @@ void lkdtm_CORRUPT_LIST_ADD(void) pr_err("Overwrite did not happen, but no BUG?!\n"); else { pr_err("list_add() corruption not detected!\n"); - pr_expected_config(CONFIG_DEBUG_LIST); + pr_expected_config(CONFIG_LIST_HARDENED); } } -void lkdtm_CORRUPT_LIST_DEL(void) +static void lkdtm_CORRUPT_LIST_DEL(void) { LIST_HEAD(test_head); struct lkdtm_list item; @@ -332,12 +517,12 @@ void lkdtm_CORRUPT_LIST_DEL(void) pr_err("Overwrite did not happen, but no BUG?!\n"); else { pr_err("list_del() corruption not detected!\n"); - pr_expected_config(CONFIG_DEBUG_LIST); + pr_expected_config(CONFIG_LIST_HARDENED); } } /* Test that VMAP_STACK is actually allocating with a leading guard page */ -void lkdtm_STACK_GUARD_PAGE_LEADING(void) +static void lkdtm_STACK_GUARD_PAGE_LEADING(void) { const unsigned char *stack = task_stack_page(current); const unsigned char *ptr = stack - 1; @@ -351,7 +536,7 @@ void lkdtm_STACK_GUARD_PAGE_LEADING(void) } /* Test that VMAP_STACK is actually allocating with a trailing guard page */ -void lkdtm_STACK_GUARD_PAGE_TRAILING(void) +static void lkdtm_STACK_GUARD_PAGE_TRAILING(void) { const unsigned char *stack = task_stack_page(current); const unsigned char *ptr = stack + THREAD_SIZE; @@ -364,7 +549,7 @@ void lkdtm_STACK_GUARD_PAGE_TRAILING(void) pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte); } -void lkdtm_UNSET_SMEP(void) +static void lkdtm_UNSET_SMEP(void) { #if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML) #define MOV_CR4_DEPTH 64 @@ -399,6 +584,7 @@ void lkdtm_UNSET_SMEP(void) * the cr4 writing instruction. */ insn = (unsigned char *)native_write_cr4; + OPTIMIZER_HIDE_VAR(insn); for (i = 0; i < MOV_CR4_DEPTH; i++) { /* mov %rdi, %cr4 */ if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7) @@ -430,7 +616,7 @@ void lkdtm_UNSET_SMEP(void) #endif } -void lkdtm_DOUBLE_FAULT(void) +static void lkdtm_DOUBLE_FAULT(void) { #if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML) /* @@ -478,7 +664,7 @@ static noinline void change_pac_parameters(void) } #endif -noinline void lkdtm_CORRUPT_PAC(void) +static noinline void lkdtm_CORRUPT_PAC(void) { #ifdef CONFIG_ARM64 #define CORRUPT_PAC_ITERATE 10 @@ -507,52 +693,39 @@ noinline void lkdtm_CORRUPT_PAC(void) #endif } -void lkdtm_FORTIFY_OBJECT(void) -{ - struct target { - char a[10]; - } target[2] = {}; - int result; - - /* - * Using volatile prevents the compiler from determining the value of - * 'size' at compile time. Without that, we would get a compile error - * rather than a runtime error. - */ - volatile int size = 11; - - pr_info("trying to read past the end of a struct\n"); - - result = memcmp(&target[0], &target[1], size); - - /* Print result to prevent the code from being eliminated */ - pr_err("FAIL: fortify did not catch an object overread!\n" - "\"%d\" was the memcmp result.\n", result); -} - -void lkdtm_FORTIFY_SUBOBJECT(void) -{ - struct target { - char a[10]; - char b[10]; - } target; - char *src; - - src = kmalloc(20, GFP_KERNEL); - strscpy(src, "over ten bytes", 20); - - pr_info("trying to strcpy past the end of a member of a struct\n"); - - /* - * strncpy(target.a, src, 20); will hit a compile error because the - * compiler knows at build time that target.a < 20 bytes. Use strcpy() - * to force a runtime error. - */ - strcpy(target.a, src); - - /* Use target.a to prevent the code from being eliminated */ - pr_err("FAIL: fortify did not catch an sub-object overrun!\n" - "\"%s\" was copied.\n", target.a); +static struct crashtype crashtypes[] = { + CRASHTYPE(PANIC), + CRASHTYPE(PANIC_STOP_IRQOFF), + CRASHTYPE(BUG), + CRASHTYPE(WARNING), + CRASHTYPE(WARNING_MESSAGE), + CRASHTYPE(EXCEPTION), + CRASHTYPE(LOOP), + CRASHTYPE(EXHAUST_STACK), + CRASHTYPE(CORRUPT_STACK), + CRASHTYPE(CORRUPT_STACK_STRONG), + CRASHTYPE(REPORT_STACK), + CRASHTYPE(REPORT_STACK_CANARY), + CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE), + CRASHTYPE(SOFTLOCKUP), + CRASHTYPE(HARDLOCKUP), + CRASHTYPE(SMP_CALL_LOCKUP), + CRASHTYPE(SPINLOCKUP), + CRASHTYPE(HUNG_TASK), + CRASHTYPE(OVERFLOW_SIGNED), + CRASHTYPE(OVERFLOW_UNSIGNED), + CRASHTYPE(ARRAY_BOUNDS), + CRASHTYPE(FAM_BOUNDS), + CRASHTYPE(CORRUPT_LIST_ADD), + CRASHTYPE(CORRUPT_LIST_DEL), + CRASHTYPE(STACK_GUARD_PAGE_LEADING), + CRASHTYPE(STACK_GUARD_PAGE_TRAILING), + CRASHTYPE(UNSET_SMEP), + CRASHTYPE(DOUBLE_FAULT), + CRASHTYPE(CORRUPT_PAC), +}; - kfree(src); -} +struct crashtype_category bugs_crashtypes = { + .crashtypes = crashtypes, + .len = ARRAY_SIZE(crashtypes), +}; diff --git a/drivers/misc/lkdtm/cfi.c b/drivers/misc/lkdtm/cfi.c index c9aeddef1044..c3971f7caa65 100644 --- a/drivers/misc/lkdtm/cfi.c +++ b/drivers/misc/lkdtm/cfi.c @@ -3,6 +3,7 @@ * This is for all the tests relating directly to Control Flow Integrity. */ #include "lkdtm.h" +#include <asm/page.h> static int called_count; @@ -19,25 +20,183 @@ static noinline int lkdtm_increment_int(int *counter) return *counter; } + +/* Don't allow the compiler to inline the calls. */ +static noinline void lkdtm_indirect_call(void (*func)(int *)) +{ + func(&called_count); +} + /* * This tries to call an indirect function with a mismatched prototype. */ -void lkdtm_CFI_FORWARD_PROTO(void) +static void lkdtm_CFI_FORWARD_PROTO(void) { /* * Matches lkdtm_increment_void()'s prototype, but not * lkdtm_increment_int()'s prototype. */ - void (*func)(int *); - pr_info("Calling matched prototype ...\n"); - func = lkdtm_increment_void; - func(&called_count); + lkdtm_indirect_call(lkdtm_increment_void); pr_info("Calling mismatched prototype ...\n"); - func = (void *)lkdtm_increment_int; - func(&called_count); + lkdtm_indirect_call((void *)lkdtm_increment_int); pr_err("FAIL: survived mismatched prototype function call!\n"); - pr_expected_config(CONFIG_CFI_CLANG); + pr_expected_config(CONFIG_CFI); +} + +/* + * This can stay local to LKDTM, as there should not be a production reason + * to disable PAC && SCS. + */ +#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL +# ifdef CONFIG_ARM64_BTI_KERNEL +# define __no_pac "branch-protection=bti" +# else +# ifdef CONFIG_CC_HAS_BRANCH_PROT_PAC_RET +# define __no_pac "branch-protection=none" +# else +# define __no_pac "sign-return-address=none" +# endif +# endif +# define __no_ret_protection __noscs __attribute__((__target__(__no_pac))) +#else +# define __no_ret_protection __noscs +#endif + +#define no_pac_addr(addr) \ + ((__force __typeof__(addr))((uintptr_t)(addr) | PAGE_OFFSET)) + +#ifdef CONFIG_RISCV +/* https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#frame-pointer-convention */ +#define FRAME_RA_OFFSET (-1) +#else +#define FRAME_RA_OFFSET 1 +#endif + +/* The ultimate ROP gadget. */ +static noinline __no_ret_protection +void set_return_addr_unchecked(unsigned long *expected, unsigned long *addr) +{ + /* Use of volatile is to make sure final write isn't seen as a dead store. */ + unsigned long * volatile *ret_addr = + (unsigned long **)__builtin_frame_address(0) + FRAME_RA_OFFSET; + + /* Make sure we've found the right place on the stack before writing it. */ + if (no_pac_addr(*ret_addr) == expected) + *ret_addr = (addr); + else + /* Check architecture, stack layout, or compiler behavior... */ + pr_warn("Eek: return address mismatch! %px != %px\n", + *ret_addr, addr); +} + +static noinline +void set_return_addr(unsigned long *expected, unsigned long *addr) +{ + /* Use of volatile is to make sure final write isn't seen as a dead store. */ + unsigned long * volatile *ret_addr = + (unsigned long **)__builtin_frame_address(0) + FRAME_RA_OFFSET; + + /* Make sure we've found the right place on the stack before writing it. */ + if (no_pac_addr(*ret_addr) == expected) + *ret_addr = (addr); + else + /* Check architecture, stack layout, or compiler behavior... */ + pr_warn("Eek: return address mismatch! %px != %px\n", + *ret_addr, addr); +} + +static volatile int force_check; + +static void lkdtm_CFI_BACKWARD(void) +{ + /* Use calculated gotos to keep labels addressable. */ + void *labels[] = { NULL, &&normal, &&redirected, &&check_normal, &&check_redirected }; + + pr_info("Attempting unchecked stack return address redirection ...\n"); + + /* Always false */ + if (force_check) { + /* + * Prepare to call with NULLs to avoid parameters being treated as + * constants in -02. + */ + set_return_addr_unchecked(NULL, NULL); + set_return_addr(NULL, NULL); + if (force_check) + goto *labels[1]; + if (force_check) + goto *labels[2]; + if (force_check) + goto *labels[3]; + if (force_check) + goto *labels[4]; + return; + } + + /* + * Use fallthrough switch case to keep basic block ordering between + * set_return_addr*() and the label after it. + */ + switch (force_check) { + case 0: + set_return_addr_unchecked(&&normal, &&redirected); + fallthrough; + case 1: +normal: + /* Always true */ + if (!force_check) { + pr_err("FAIL: stack return address manipulation failed!\n"); + /* If we can't redirect "normally", we can't test mitigations. */ + return; + } + break; + default: +redirected: + pr_info("ok: redirected stack return address.\n"); + break; + } + + pr_info("Attempting checked stack return address redirection ...\n"); + + switch (force_check) { + case 0: + set_return_addr(&&check_normal, &&check_redirected); + fallthrough; + case 1: +check_normal: + /* Always true */ + if (!force_check) { + pr_info("ok: control flow unchanged.\n"); + return; + } + +check_redirected: + pr_err("FAIL: stack return address was redirected!\n"); + break; + } + + if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) { + pr_expected_config(CONFIG_ARM64_PTR_AUTH_KERNEL); + return; + } + if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK)) { + pr_expected_config(CONFIG_SHADOW_CALL_STACK); + return; + } + pr_warn("This is probably expected, since this %s was built *without* %s=y nor %s=y\n", + lkdtm_kernel_info, + "CONFIG_ARM64_PTR_AUTH_KERNEL", "CONFIG_SHADOW_CALL_STACK"); } + +static struct crashtype crashtypes[] = { + CRASHTYPE(CFI_FORWARD_PROTO), + CRASHTYPE(CFI_BACKWARD), +}; + +struct crashtype_category cfi_crashtypes = { + .crashtypes = crashtypes, + .len = ARRAY_SIZE(crashtypes), +}; diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c index 9dda87c6b54a..5732fd59a227 100644 --- a/drivers/misc/lkdtm/core.c +++ b/drivers/misc/lkdtm/core.c @@ -26,7 +26,7 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/debugfs.h> -#include <linux/init.h> +#include <linux/utsname.h> #define DEFAULT_COUNT 10 @@ -79,115 +79,28 @@ static struct crashpoint crashpoints[] = { CRASHPOINT("INT_HARDWARE_ENTRY", "do_IRQ"), CRASHPOINT("INT_HW_IRQ_EN", "handle_irq_event"), CRASHPOINT("INT_TASKLET_ENTRY", "tasklet_action"), - CRASHPOINT("FS_DEVRW", "ll_rw_block"), + CRASHPOINT("FS_SUBMIT_BH", "submit_bh"), CRASHPOINT("MEM_SWAPOUT", "shrink_inactive_list"), CRASHPOINT("TIMERADD", "hrtimer_start"), - CRASHPOINT("SCSI_DISPATCH_CMD", "scsi_dispatch_cmd"), - CRASHPOINT("IDE_CORE_CP", "generic_ide_ioctl"), + CRASHPOINT("SCSI_QUEUE_RQ", "scsi_queue_rq"), #endif }; - -/* Crash types. */ -struct crashtype { - const char *name; - void (*func)(void); -}; - -#define CRASHTYPE(_name) \ - { \ - .name = __stringify(_name), \ - .func = lkdtm_ ## _name, \ - } - -/* Define the possible types of crashes that can be triggered. */ -static const struct crashtype crashtypes[] = { - CRASHTYPE(PANIC), - CRASHTYPE(BUG), - CRASHTYPE(WARNING), - CRASHTYPE(WARNING_MESSAGE), - CRASHTYPE(EXCEPTION), - CRASHTYPE(LOOP), - CRASHTYPE(EXHAUST_STACK), - CRASHTYPE(CORRUPT_STACK), - CRASHTYPE(CORRUPT_STACK_STRONG), - CRASHTYPE(REPORT_STACK), - CRASHTYPE(CORRUPT_LIST_ADD), - CRASHTYPE(CORRUPT_LIST_DEL), - CRASHTYPE(STACK_GUARD_PAGE_LEADING), - CRASHTYPE(STACK_GUARD_PAGE_TRAILING), - CRASHTYPE(UNSET_SMEP), - CRASHTYPE(CORRUPT_PAC), - CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE), - CRASHTYPE(FORTIFY_OBJECT), - CRASHTYPE(FORTIFY_SUBOBJECT), - CRASHTYPE(SLAB_LINEAR_OVERFLOW), - CRASHTYPE(VMALLOC_LINEAR_OVERFLOW), - CRASHTYPE(WRITE_AFTER_FREE), - CRASHTYPE(READ_AFTER_FREE), - CRASHTYPE(WRITE_BUDDY_AFTER_FREE), - CRASHTYPE(READ_BUDDY_AFTER_FREE), - CRASHTYPE(SLAB_INIT_ON_ALLOC), - CRASHTYPE(BUDDY_INIT_ON_ALLOC), - CRASHTYPE(SLAB_FREE_DOUBLE), - CRASHTYPE(SLAB_FREE_CROSS), - CRASHTYPE(SLAB_FREE_PAGE), - CRASHTYPE(SOFTLOCKUP), - CRASHTYPE(HARDLOCKUP), - CRASHTYPE(SPINLOCKUP), - CRASHTYPE(HUNG_TASK), - CRASHTYPE(OVERFLOW_SIGNED), - CRASHTYPE(OVERFLOW_UNSIGNED), - CRASHTYPE(ARRAY_BOUNDS), - CRASHTYPE(EXEC_DATA), - CRASHTYPE(EXEC_STACK), - CRASHTYPE(EXEC_KMALLOC), - CRASHTYPE(EXEC_VMALLOC), - CRASHTYPE(EXEC_RODATA), - CRASHTYPE(EXEC_USERSPACE), - CRASHTYPE(EXEC_NULL), - CRASHTYPE(ACCESS_USERSPACE), - CRASHTYPE(ACCESS_NULL), - CRASHTYPE(WRITE_RO), - CRASHTYPE(WRITE_RO_AFTER_INIT), - CRASHTYPE(WRITE_KERN), - CRASHTYPE(REFCOUNT_INC_OVERFLOW), - CRASHTYPE(REFCOUNT_ADD_OVERFLOW), - CRASHTYPE(REFCOUNT_INC_NOT_ZERO_OVERFLOW), - CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_OVERFLOW), - CRASHTYPE(REFCOUNT_DEC_ZERO), - CRASHTYPE(REFCOUNT_DEC_NEGATIVE), - CRASHTYPE(REFCOUNT_DEC_AND_TEST_NEGATIVE), - CRASHTYPE(REFCOUNT_SUB_AND_TEST_NEGATIVE), - CRASHTYPE(REFCOUNT_INC_ZERO), - CRASHTYPE(REFCOUNT_ADD_ZERO), - CRASHTYPE(REFCOUNT_INC_SATURATED), - CRASHTYPE(REFCOUNT_DEC_SATURATED), - CRASHTYPE(REFCOUNT_ADD_SATURATED), - CRASHTYPE(REFCOUNT_INC_NOT_ZERO_SATURATED), - CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_SATURATED), - CRASHTYPE(REFCOUNT_DEC_AND_TEST_SATURATED), - CRASHTYPE(REFCOUNT_SUB_AND_TEST_SATURATED), - CRASHTYPE(REFCOUNT_TIMING), - CRASHTYPE(ATOMIC_TIMING), - CRASHTYPE(USERCOPY_HEAP_SIZE_TO), - CRASHTYPE(USERCOPY_HEAP_SIZE_FROM), - CRASHTYPE(USERCOPY_HEAP_WHITELIST_TO), - CRASHTYPE(USERCOPY_HEAP_WHITELIST_FROM), - CRASHTYPE(USERCOPY_STACK_FRAME_TO), - CRASHTYPE(USERCOPY_STACK_FRAME_FROM), - CRASHTYPE(USERCOPY_STACK_BEYOND), - CRASHTYPE(USERCOPY_KERNEL), - CRASHTYPE(STACKLEAK_ERASING), - CRASHTYPE(CFI_FORWARD_PROTO), - CRASHTYPE(FORTIFIED_STRSCPY), - CRASHTYPE(DOUBLE_FAULT), -#ifdef CONFIG_PPC_BOOK3S_64 - CRASHTYPE(PPC_SLB_MULTIHIT), +/* List of possible types for crashes that can be triggered. */ +static const struct crashtype_category *crashtype_categories[] = { + &bugs_crashtypes, + &heap_crashtypes, + &perms_crashtypes, + &refcount_crashtypes, + &usercopy_crashtypes, + &stackleak_crashtypes, + &cfi_crashtypes, + &fortify_crashtypes, +#ifdef CONFIG_PPC_64S_HASH_MMU + &powerpc_crashtypes, #endif }; - /* Global kprobe entry and crashtype. */ static struct kprobe *lkdtm_kprobe; static struct crashpoint *lkdtm_crashpoint; @@ -212,15 +125,26 @@ module_param(cpoint_count, int, 0644); MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\ "crash point is to be hit to trigger action"); +/* + * For test debug reporting when CI systems provide terse summaries. + * TODO: Remove this once reasonable reporting exists in most CI systems: + * https://lore.kernel.org/lkml/CAHk-=wiFvfkoFixTapvvyPMN9pq5G-+Dys2eSyBa1vzDGAO5+A@mail.gmail.com + */ +char *lkdtm_kernel_info; /* Return the crashtype number or NULL if the name is invalid */ static const struct crashtype *find_crashtype(const char *name) { - int i; + int cat, idx; + + for (cat = 0; cat < ARRAY_SIZE(crashtype_categories); cat++) { + for (idx = 0; idx < crashtype_categories[cat]->len; idx++) { + struct crashtype *crashtype; - for (i = 0; i < ARRAY_SIZE(crashtypes); i++) { - if (!strcmp(name, crashtypes[i].name)) - return &crashtypes[i]; + crashtype = &crashtype_categories[cat]->crashtypes[idx]; + if (!strcmp(name, crashtype->name)) + return crashtype; + } } return NULL; @@ -229,12 +153,17 @@ static const struct crashtype *find_crashtype(const char *name) /* * This is forced noinline just so it distinctly shows up in the stackdump * which makes validation of expected lkdtm crashes easier. + * + * NOTE: having a valid return value helps prevent the compiler from doing + * tail call optimizations and taking this out of the stack trace. */ -static noinline void lkdtm_do_action(const struct crashtype *crashtype) +static noinline int lkdtm_do_action(const struct crashtype *crashtype) { if (WARN_ON(!crashtype || !crashtype->func)) - return; + return -EINVAL; crashtype->func(); + + return 0; } static int lkdtm_register_cpoint(struct crashpoint *crashpoint, @@ -243,10 +172,8 @@ static int lkdtm_register_cpoint(struct crashpoint *crashpoint, int ret; /* If this doesn't have a symbol, just call immediately. */ - if (!crashpoint->kprobe.symbol_name) { - lkdtm_do_action(crashtype); - return 0; - } + if (!crashpoint->kprobe.symbol_name) + return lkdtm_do_action(crashtype); if (lkdtm_kprobe != NULL) unregister_kprobe(lkdtm_kprobe); @@ -292,7 +219,7 @@ static int lkdtm_kprobe_handler(struct kprobe *kp, struct pt_regs *regs) spin_unlock_irqrestore(&crash_count_lock, flags); if (do_it) - lkdtm_do_action(lkdtm_crashtype); + return lkdtm_do_action(lkdtm_crashtype); return 0; } @@ -340,17 +267,24 @@ static ssize_t lkdtm_debugfs_entry(struct file *f, static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf, size_t count, loff_t *off) { + int n, cat, idx; + ssize_t out; char *buf; - int i, n, out; buf = (char *)__get_free_page(GFP_KERNEL); if (buf == NULL) return -ENOMEM; n = scnprintf(buf, PAGE_SIZE, "Available crash types:\n"); - for (i = 0; i < ARRAY_SIZE(crashtypes); i++) { - n += scnprintf(buf + n, PAGE_SIZE - n, "%s\n", - crashtypes[i].name); + + for (cat = 0; cat < ARRAY_SIZE(crashtype_categories); cat++) { + for (idx = 0; idx < crashtype_categories[cat]->len; idx++) { + struct crashtype *crashtype; + + crashtype = &crashtype_categories[cat]->crashtypes[idx]; + n += scnprintf(buf + n, PAGE_SIZE - n, "%s\n", + crashtype->name); + } } buf[n] = '\0'; @@ -372,6 +306,7 @@ static ssize_t direct_entry(struct file *f, const char __user *user_buf, { const struct crashtype *crashtype; char *buf; + int err; if (count >= PAGE_SIZE) return -EINVAL; @@ -395,9 +330,11 @@ static ssize_t direct_entry(struct file *f, const char __user *user_buf, return -EINVAL; pr_info("Performing direct entry %s\n", crashtype->name); - lkdtm_do_action(crashtype); + err = lkdtm_do_action(crashtype); *off += count; + if (err) + return err; return count; } @@ -492,6 +429,11 @@ static int __init lkdtm_module_init(void) crash_count = cpoint_count; #endif + /* Common initialization. */ + lkdtm_kernel_info = kasprintf(GFP_KERNEL, "kernel (%s %s)", + init_uts_ns.name.release, + init_uts_ns.name.machine); + /* Handle test-specific initialization. */ lkdtm_bugs_init(&recur_count); lkdtm_perms_init(); @@ -540,6 +482,8 @@ static void __exit lkdtm_module_exit(void) if (lkdtm_kprobe != NULL) unregister_kprobe(lkdtm_kprobe); + kfree(lkdtm_kernel_info); + pr_info("Crash point unregistered\n"); } diff --git a/drivers/misc/lkdtm/fortify.c b/drivers/misc/lkdtm/fortify.c index 0f51d31b57ca..00ed2147113e 100644 --- a/drivers/misc/lkdtm/fortify.c +++ b/drivers/misc/lkdtm/fortify.c @@ -8,13 +8,140 @@ #include <linux/string.h> #include <linux/slab.h> +static volatile int fortify_scratch_space; + +static void lkdtm_FORTIFY_STR_OBJECT(void) +{ + struct target { + char a[10]; + int foo; + } target[3] = {}; + /* + * Using volatile prevents the compiler from determining the value of + * 'size' at compile time. Without that, we would get a compile error + * rather than a runtime error. + */ + volatile int size = 20; + + pr_info("trying to strcmp() past the end of a struct\n"); + + strncpy(target[0].a, target[1].a, size); + + /* Store result to global to prevent the code from being eliminated */ + fortify_scratch_space = target[0].a[3]; + + pr_err("FAIL: fortify did not block a strncpy() object write overflow!\n"); + pr_expected_config(CONFIG_FORTIFY_SOURCE); +} + +static void lkdtm_FORTIFY_STR_MEMBER(void) +{ + struct target { + char a[10]; + char b[10]; + } target; + volatile int size = 20; + char *src; + + src = kmalloc(size, GFP_KERNEL); + if (!src) + return; + + strscpy(src, "over ten bytes", size); + size = strlen(src) + 1; + + pr_info("trying to strncpy() past the end of a struct member...\n"); + + /* + * strncpy(target.a, src, 20); will hit a compile error because the + * compiler knows at build time that target.a < 20 bytes. Use a + * volatile to force a runtime error. + */ + strncpy(target.a, src, size); + + /* Store result to global to prevent the code from being eliminated */ + fortify_scratch_space = target.a[3]; + + pr_err("FAIL: fortify did not block a strncpy() struct member write overflow!\n"); + pr_expected_config(CONFIG_FORTIFY_SOURCE); + + kfree(src); +} + +static void lkdtm_FORTIFY_MEM_OBJECT(void) +{ + int before[10]; + struct target { + char a[10]; + int foo; + } target = {}; + int after[10]; + /* + * Using volatile prevents the compiler from determining the value of + * 'size' at compile time. Without that, we would get a compile error + * rather than a runtime error. + */ + volatile int size = 20; + + memset(before, 0, sizeof(before)); + memset(after, 0, sizeof(after)); + fortify_scratch_space = before[5]; + fortify_scratch_space = after[5]; + + pr_info("trying to memcpy() past the end of a struct\n"); + + pr_info("0: %zu\n", __builtin_object_size(&target, 0)); + pr_info("1: %zu\n", __builtin_object_size(&target, 1)); + pr_info("s: %d\n", size); + memcpy(&target, &before, size); + + /* Store result to global to prevent the code from being eliminated */ + fortify_scratch_space = target.a[3]; + + pr_err("FAIL: fortify did not block a memcpy() object write overflow!\n"); + pr_expected_config(CONFIG_FORTIFY_SOURCE); +} + +static void lkdtm_FORTIFY_MEM_MEMBER(void) +{ + struct target { + char a[10]; + char b[10]; + } target; + volatile int size = 20; + char *src; + + src = kmalloc(size, GFP_KERNEL); + if (!src) + return; + + strscpy(src, "over ten bytes", size); + size = strlen(src) + 1; + + pr_info("trying to memcpy() past the end of a struct member...\n"); + + /* + * strncpy(target.a, src, 20); will hit a compile error because the + * compiler knows at build time that target.a < 20 bytes. Use a + * volatile to force a runtime error. + */ + memcpy(target.a, src, size); + + /* Store result to global to prevent the code from being eliminated */ + fortify_scratch_space = target.a[3]; + + pr_err("FAIL: fortify did not block a memcpy() struct member write overflow!\n"); + pr_expected_config(CONFIG_FORTIFY_SOURCE); + + kfree(src); +} /* * Calls fortified strscpy to test that it returns the same result as vanilla * strscpy and generate a panic because there is a write overflow (i.e. src * length is greater than dst length). */ -void lkdtm_FORTIFIED_STRSCPY(void) +static void lkdtm_FORTIFY_STRSCPY(void) { char *src; char dst[5]; @@ -81,3 +208,16 @@ void lkdtm_FORTIFIED_STRSCPY(void) kfree(src); } + +static struct crashtype crashtypes[] = { + CRASHTYPE(FORTIFY_STR_OBJECT), + CRASHTYPE(FORTIFY_STR_MEMBER), + CRASHTYPE(FORTIFY_MEM_OBJECT), + CRASHTYPE(FORTIFY_MEM_MEMBER), + CRASHTYPE(FORTIFY_STRSCPY), +}; + +struct crashtype_category fortify_crashtypes = { + .crashtypes = crashtypes, + .len = ARRAY_SIZE(crashtypes), +}; diff --git a/drivers/misc/lkdtm/heap.c b/drivers/misc/lkdtm/heap.c index 3d9aae5821a0..c1a05b935894 100644 --- a/drivers/misc/lkdtm/heap.c +++ b/drivers/misc/lkdtm/heap.c @@ -4,6 +4,7 @@ * page allocation and slab allocations. */ #include "lkdtm.h" +#include <linux/kfence.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/sched.h> @@ -13,18 +14,29 @@ static struct kmem_cache *a_cache; static struct kmem_cache *b_cache; /* + * Using volatile here means the compiler cannot ever make assumptions + * about this value. This means compile-time length checks involving + * this variable cannot be performed; only run-time checks. + */ +static volatile int __offset = 1; + +/* * If there aren't guard pages, it's likely that a consecutive allocation will * let us overflow into the second allocation without overwriting something real. + * + * This should always be caught because there is an unconditional unmapped + * page after vmap allocations. */ -void lkdtm_VMALLOC_LINEAR_OVERFLOW(void) +static void lkdtm_VMALLOC_LINEAR_OVERFLOW(void) { char *one, *two; one = vzalloc(PAGE_SIZE); + OPTIMIZER_HIDE_VAR(one); two = vzalloc(PAGE_SIZE); pr_info("Attempting vmalloc linear overflow ...\n"); - memset(one, 0xAA, PAGE_SIZE + 1); + memset(one, 0xAA, PAGE_SIZE + __offset); vfree(two); vfree(one); @@ -34,8 +46,11 @@ void lkdtm_VMALLOC_LINEAR_OVERFLOW(void) * This tries to stay within the next largest power-of-2 kmalloc cache * to avoid actually overwriting anything important if it's not detected * correctly. + * + * This should get caught by either memory tagging, KASan, or by using + * CONFIG_SLUB_DEBUG=y and slab_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y). */ -void lkdtm_SLAB_LINEAR_OVERFLOW(void) +static void lkdtm_SLAB_LINEAR_OVERFLOW(void) { size_t len = 1020; u32 *data = kmalloc(len, GFP_KERNEL); @@ -43,11 +58,12 @@ void lkdtm_SLAB_LINEAR_OVERFLOW(void) return; pr_info("Attempting slab linear overflow ...\n"); + OPTIMIZER_HIDE_VAR(data); data[1024 / sizeof(u32)] = 0x12345678; kfree(data); } -void lkdtm_WRITE_AFTER_FREE(void) +static void lkdtm_WRITE_AFTER_FREE(void) { int *base, *again; size_t len = 1024; @@ -73,7 +89,7 @@ void lkdtm_WRITE_AFTER_FREE(void) pr_info("Hmm, didn't get the same memory range.\n"); } -void lkdtm_READ_AFTER_FREE(void) +static void lkdtm_READ_AFTER_FREE(void) { int *base, *val, saw; size_t len = 1024; @@ -117,7 +133,65 @@ void lkdtm_READ_AFTER_FREE(void) kfree(val); } -void lkdtm_WRITE_BUDDY_AFTER_FREE(void) +static void lkdtm_KFENCE_READ_AFTER_FREE(void) +{ + int *base, val, saw; + unsigned long timeout, resched_after; + size_t len = 1024; + /* + * The slub allocator will use the either the first word or + * the middle of the allocation to store the free pointer, + * depending on configurations. Store in the second word to + * avoid running into the freelist. + */ + size_t offset = sizeof(*base); + + /* + * 100x the sample interval should be more than enough to ensure we get + * a KFENCE allocation eventually. + */ + timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval); + /* + * Especially for non-preemption kernels, ensure the allocation-gate + * timer can catch up: after @resched_after, every failed allocation + * attempt yields, to ensure the allocation-gate timer is scheduled. + */ + resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval); + do { + base = kmalloc(len, GFP_KERNEL); + if (!base) { + pr_err("FAIL: Unable to allocate kfence memory!\n"); + return; + } + + if (is_kfence_address(base)) { + val = 0x12345678; + base[offset] = val; + pr_info("Value in memory before free: %x\n", base[offset]); + + kfree(base); + + pr_info("Attempting bad read from freed memory\n"); + saw = base[offset]; + if (saw != val) { + /* Good! Poisoning happened, so declare a win. */ + pr_info("Memory correctly poisoned (%x)\n", saw); + } else { + pr_err("FAIL: Memory was not poisoned!\n"); + pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free"); + } + return; + } + + kfree(base); + if (time_after(jiffies, resched_after)) + cond_resched(); + } while (time_before(jiffies, timeout)); + + pr_err("FAIL: kfence memory never allocated!\n"); +} + +static void lkdtm_WRITE_BUDDY_AFTER_FREE(void) { unsigned long p = __get_free_page(GFP_KERNEL); if (!p) { @@ -137,7 +211,7 @@ void lkdtm_WRITE_BUDDY_AFTER_FREE(void) schedule(); } -void lkdtm_READ_BUDDY_AFTER_FREE(void) +static void lkdtm_READ_BUDDY_AFTER_FREE(void) { unsigned long p = __get_free_page(GFP_KERNEL); int saw, *val; @@ -174,7 +248,7 @@ void lkdtm_READ_BUDDY_AFTER_FREE(void) kfree(val); } -void lkdtm_SLAB_INIT_ON_ALLOC(void) +static void lkdtm_SLAB_INIT_ON_ALLOC(void) { u8 *first; u8 *val; @@ -206,7 +280,7 @@ void lkdtm_SLAB_INIT_ON_ALLOC(void) kfree(val); } -void lkdtm_BUDDY_INIT_ON_ALLOC(void) +static void lkdtm_BUDDY_INIT_ON_ALLOC(void) { u8 *first; u8 *val; @@ -239,7 +313,7 @@ void lkdtm_BUDDY_INIT_ON_ALLOC(void) free_page((unsigned long)val); } -void lkdtm_SLAB_FREE_DOUBLE(void) +static void lkdtm_SLAB_FREE_DOUBLE(void) { int *val; @@ -256,7 +330,7 @@ void lkdtm_SLAB_FREE_DOUBLE(void) kmem_cache_free(double_free_cache, val); } -void lkdtm_SLAB_FREE_CROSS(void) +static void lkdtm_SLAB_FREE_CROSS(void) { int *val; @@ -272,7 +346,7 @@ void lkdtm_SLAB_FREE_CROSS(void) kmem_cache_free(b_cache, val); } -void lkdtm_SLAB_FREE_PAGE(void) +static void lkdtm_SLAB_FREE_PAGE(void) { unsigned long p = __get_free_page(GFP_KERNEL); @@ -281,23 +355,12 @@ void lkdtm_SLAB_FREE_PAGE(void) free_page(p); } -/* - * We have constructors to keep the caches distinctly separated without - * needing to boot with "slab_nomerge". - */ -static void ctor_double_free(void *region) -{ } -static void ctor_a(void *region) -{ } -static void ctor_b(void *region) -{ } - void __init lkdtm_heap_init(void) { double_free_cache = kmem_cache_create("lkdtm-heap-double_free", - 64, 0, 0, ctor_double_free); - a_cache = kmem_cache_create("lkdtm-heap-a", 64, 0, 0, ctor_a); - b_cache = kmem_cache_create("lkdtm-heap-b", 64, 0, 0, ctor_b); + 64, 0, SLAB_NO_MERGE, NULL); + a_cache = kmem_cache_create("lkdtm-heap-a", 64, 0, SLAB_NO_MERGE, NULL); + b_cache = kmem_cache_create("lkdtm-heap-b", 64, 0, SLAB_NO_MERGE, NULL); } void __exit lkdtm_heap_exit(void) @@ -306,3 +369,23 @@ void __exit lkdtm_heap_exit(void) kmem_cache_destroy(a_cache); kmem_cache_destroy(b_cache); } + +static struct crashtype crashtypes[] = { + CRASHTYPE(SLAB_LINEAR_OVERFLOW), + CRASHTYPE(VMALLOC_LINEAR_OVERFLOW), + CRASHTYPE(WRITE_AFTER_FREE), + CRASHTYPE(READ_AFTER_FREE), + CRASHTYPE(KFENCE_READ_AFTER_FREE), + CRASHTYPE(WRITE_BUDDY_AFTER_FREE), + CRASHTYPE(READ_BUDDY_AFTER_FREE), + CRASHTYPE(SLAB_INIT_ON_ALLOC), + CRASHTYPE(BUDDY_INIT_ON_ALLOC), + CRASHTYPE(SLAB_FREE_DOUBLE), + CRASHTYPE(SLAB_FREE_CROSS), + CRASHTYPE(SLAB_FREE_PAGE), +}; + +struct crashtype_category heap_crashtypes = { + .crashtypes = crashtypes, + .len = ARRAY_SIZE(crashtypes), +}; diff --git a/drivers/misc/lkdtm/kstack_erase.c b/drivers/misc/lkdtm/kstack_erase.c new file mode 100644 index 000000000000..4fd9b0bfb874 --- /dev/null +++ b/drivers/misc/lkdtm/kstack_erase.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This code tests that the current task stack is properly erased (filled + * with KSTACK_ERASE_POISON). + * + * Authors: + * Alexander Popov <alex.popov@linux.com> + * Tycho Andersen <tycho@tycho.ws> + */ + +#include "lkdtm.h" +#include <linux/kstack_erase.h> + +#if defined(CONFIG_KSTACK_ERASE) +/* + * Check that stackleak tracks the lowest stack pointer and erases the stack + * below this as expected. + * + * To prevent the lowest stack pointer changing during the test, IRQs are + * masked and instrumentation of this function is disabled. We assume that the + * compiler will create a fixed-size stack frame for this function. + * + * Any non-inlined function may make further use of the stack, altering the + * lowest stack pointer and/or clobbering poison values. To avoid spurious + * failures we must avoid printing until the end of the test or have already + * encountered a failure condition. + */ +static void noinstr check_stackleak_irqoff(void) +{ + const unsigned long task_stack_base = (unsigned long)task_stack_page(current); + const unsigned long task_stack_low = stackleak_task_low_bound(current); + const unsigned long task_stack_high = stackleak_task_high_bound(current); + const unsigned long current_sp = current_stack_pointer; + const unsigned long lowest_sp = current->lowest_stack; + unsigned long untracked_high; + unsigned long poison_high, poison_low; + bool test_failed = false; + + /* + * Check that the current and lowest recorded stack pointer values fall + * within the expected task stack boundaries. These tests should never + * fail unless the boundaries are incorrect or we're clobbering the + * STACK_END_MAGIC, and in either casee something is seriously wrong. + */ + if (current_sp < task_stack_low || current_sp >= task_stack_high) { + instrumentation_begin(); + pr_err("FAIL: current_stack_pointer (0x%lx) outside of task stack bounds [0x%lx..0x%lx]\n", + current_sp, task_stack_low, task_stack_high - 1); + test_failed = true; + goto out; + } + if (lowest_sp < task_stack_low || lowest_sp >= task_stack_high) { + instrumentation_begin(); + pr_err("FAIL: current->lowest_stack (0x%lx) outside of task stack bounds [0x%lx..0x%lx]\n", + lowest_sp, task_stack_low, task_stack_high - 1); + test_failed = true; + goto out; + } + + /* + * Depending on what has run prior to this test, the lowest recorded + * stack pointer could be above or below the current stack pointer. + * Start from the lowest of the two. + * + * Poison values are naturally-aligned unsigned longs. As the current + * stack pointer might not be sufficiently aligned, we must align + * downwards to find the lowest known stack pointer value. This is the + * high boundary for a portion of the stack which may have been used + * without being tracked, and has to be scanned for poison. + */ + untracked_high = min(current_sp, lowest_sp); + untracked_high = ALIGN_DOWN(untracked_high, sizeof(unsigned long)); + + /* + * Find the top of the poison in the same way as the erasing code. + */ + poison_high = stackleak_find_top_of_poison(task_stack_low, untracked_high); + + /* + * Check whether the poisoned portion of the stack (if any) consists + * entirely of poison. This verifies the entries that + * stackleak_find_top_of_poison() should have checked. + */ + poison_low = poison_high; + while (poison_low > task_stack_low) { + poison_low -= sizeof(unsigned long); + + if (*(unsigned long *)poison_low == KSTACK_ERASE_POISON) + continue; + + instrumentation_begin(); + pr_err("FAIL: non-poison value %lu bytes below poison boundary: 0x%lx\n", + poison_high - poison_low, *(unsigned long *)poison_low); + test_failed = true; + goto out; + } + + instrumentation_begin(); + pr_info("kstack erase stack usage:\n" + " high offset: %lu bytes\n" + " current: %lu bytes\n" + " lowest: %lu bytes\n" + " tracked: %lu bytes\n" + " untracked: %lu bytes\n" + " poisoned: %lu bytes\n" + " low offset: %lu bytes\n", + task_stack_base + THREAD_SIZE - task_stack_high, + task_stack_high - current_sp, + task_stack_high - lowest_sp, + task_stack_high - untracked_high, + untracked_high - poison_high, + poison_high - task_stack_low, + task_stack_low - task_stack_base); + +out: + if (test_failed) { + pr_err("FAIL: the thread stack is NOT properly erased!\n"); + } else { + pr_info("OK: the rest of the thread stack is properly erased\n"); + } + instrumentation_end(); +} + +static void lkdtm_KSTACK_ERASE(void) +{ + unsigned long flags; + + local_irq_save(flags); + check_stackleak_irqoff(); + local_irq_restore(flags); +} +#else /* defined(CONFIG_KSTACK_ERASE) */ +static void lkdtm_KSTACK_ERASE(void) +{ + if (IS_ENABLED(CONFIG_HAVE_ARCH_KSTACK_ERASE)) { + pr_err("XFAIL: stackleak is not enabled (CONFIG_KSTACK_ERASE=n)\n"); + } else { + pr_err("XFAIL: stackleak is not supported on this arch (HAVE_ARCH_KSTACK_ERASE=n)\n"); + } +} +#endif /* defined(CONFIG_KSTACK_ERASE) */ + +static struct crashtype crashtypes[] = { + CRASHTYPE(KSTACK_ERASE), +}; + +struct crashtype_category stackleak_crashtypes = { + .crashtypes = crashtypes, + .len = ARRAY_SIZE(crashtypes), +}; diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h index 6a30b60519f3..015e0484026b 100644 --- a/drivers/misc/lkdtm/lkdtm.h +++ b/drivers/misc/lkdtm/lkdtm.h @@ -6,153 +6,95 @@ #include <linux/kernel.h> +extern char *lkdtm_kernel_info; + #define pr_expected_config(kconfig) \ -{ \ +do { \ if (IS_ENABLED(kconfig)) \ - pr_err("Unexpected! This kernel was built with " #kconfig "=y\n"); \ + pr_err("Unexpected! This %s was built with " #kconfig "=y\n", \ + lkdtm_kernel_info); \ else \ - pr_warn("This is probably expected, since this kernel was built *without* " #kconfig "=y\n"); \ -} + pr_warn("This is probably expected, since this %s was built *without* " #kconfig "=y\n", \ + lkdtm_kernel_info); \ +} while (0) #ifndef MODULE int lkdtm_check_bool_cmdline(const char *param); #define pr_expected_config_param(kconfig, param) \ -{ \ +do { \ if (IS_ENABLED(kconfig)) { \ switch (lkdtm_check_bool_cmdline(param)) { \ case 0: \ - pr_warn("This is probably expected, since this kernel was built with " #kconfig "=y but booted with '" param "=N'\n"); \ + pr_warn("This is probably expected, since this %s was built with " #kconfig "=y but booted with '" param "=N'\n", \ + lkdtm_kernel_info); \ break; \ case 1: \ - pr_err("Unexpected! This kernel was built with " #kconfig "=y and booted with '" param "=Y'\n"); \ + pr_err("Unexpected! This %s was built with " #kconfig "=y and booted with '" param "=Y'\n", \ + lkdtm_kernel_info); \ break; \ default: \ - pr_err("Unexpected! This kernel was built with " #kconfig "=y (and booted without '" param "' specified)\n"); \ + pr_err("Unexpected! This %s was built with " #kconfig "=y (and booted without '" param "' specified)\n", \ + lkdtm_kernel_info); \ } \ } else { \ switch (lkdtm_check_bool_cmdline(param)) { \ case 0: \ - pr_warn("This is probably expected, as kernel was built *without* " #kconfig "=y and booted with '" param "=N'\n"); \ + pr_warn("This is probably expected, as this %s was built *without* " #kconfig "=y and booted with '" param "=N'\n", \ + lkdtm_kernel_info); \ break; \ case 1: \ - pr_err("Unexpected! This kernel was built *without* " #kconfig "=y but booted with '" param "=Y'\n"); \ + pr_err("Unexpected! This %s was built *without* " #kconfig "=y but booted with '" param "=Y'\n", \ + lkdtm_kernel_info); \ break; \ default: \ - pr_err("This is probably expected, since this kernel was built *without* " #kconfig "=y (and booted without '" param "' specified)\n"); \ + pr_err("This is probably expected, since this %s was built *without* " #kconfig "=y (and booted without '" param "' specified)\n", \ + lkdtm_kernel_info); \ break; \ } \ } \ -} +} while (0) #else #define pr_expected_config_param(kconfig, param) pr_expected_config(kconfig) #endif -/* bugs.c */ -void __init lkdtm_bugs_init(int *recur_param); -void lkdtm_PANIC(void); -void lkdtm_BUG(void); -void lkdtm_WARNING(void); -void lkdtm_WARNING_MESSAGE(void); -void lkdtm_EXCEPTION(void); -void lkdtm_LOOP(void); -void lkdtm_EXHAUST_STACK(void); -void lkdtm_CORRUPT_STACK(void); -void lkdtm_CORRUPT_STACK_STRONG(void); -void lkdtm_REPORT_STACK(void); -void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void); -void lkdtm_SOFTLOCKUP(void); -void lkdtm_HARDLOCKUP(void); -void lkdtm_SPINLOCKUP(void); -void lkdtm_HUNG_TASK(void); -void lkdtm_OVERFLOW_SIGNED(void); -void lkdtm_OVERFLOW_UNSIGNED(void); -void lkdtm_ARRAY_BOUNDS(void); -void lkdtm_CORRUPT_LIST_ADD(void); -void lkdtm_CORRUPT_LIST_DEL(void); -void lkdtm_STACK_GUARD_PAGE_LEADING(void); -void lkdtm_STACK_GUARD_PAGE_TRAILING(void); -void lkdtm_UNSET_SMEP(void); -void lkdtm_DOUBLE_FAULT(void); -void lkdtm_CORRUPT_PAC(void); -void lkdtm_FORTIFY_OBJECT(void); -void lkdtm_FORTIFY_SUBOBJECT(void); - -/* heap.c */ -void __init lkdtm_heap_init(void); -void __exit lkdtm_heap_exit(void); -void lkdtm_VMALLOC_LINEAR_OVERFLOW(void); -void lkdtm_SLAB_LINEAR_OVERFLOW(void); -void lkdtm_WRITE_AFTER_FREE(void); -void lkdtm_READ_AFTER_FREE(void); -void lkdtm_WRITE_BUDDY_AFTER_FREE(void); -void lkdtm_READ_BUDDY_AFTER_FREE(void); -void lkdtm_SLAB_INIT_ON_ALLOC(void); -void lkdtm_BUDDY_INIT_ON_ALLOC(void); -void lkdtm_SLAB_FREE_DOUBLE(void); -void lkdtm_SLAB_FREE_CROSS(void); -void lkdtm_SLAB_FREE_PAGE(void); +/* Crash types. */ +struct crashtype { + const char *name; + void (*func)(void); +}; -/* perms.c */ -void __init lkdtm_perms_init(void); -void lkdtm_WRITE_RO(void); -void lkdtm_WRITE_RO_AFTER_INIT(void); -void lkdtm_WRITE_KERN(void); -void lkdtm_EXEC_DATA(void); -void lkdtm_EXEC_STACK(void); -void lkdtm_EXEC_KMALLOC(void); -void lkdtm_EXEC_VMALLOC(void); -void lkdtm_EXEC_RODATA(void); -void lkdtm_EXEC_USERSPACE(void); -void lkdtm_EXEC_NULL(void); -void lkdtm_ACCESS_USERSPACE(void); -void lkdtm_ACCESS_NULL(void); +#define CRASHTYPE(_name) \ + { \ + .name = __stringify(_name), \ + .func = lkdtm_ ## _name, \ + } -/* refcount.c */ -void lkdtm_REFCOUNT_INC_OVERFLOW(void); -void lkdtm_REFCOUNT_ADD_OVERFLOW(void); -void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void); -void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void); -void lkdtm_REFCOUNT_DEC_ZERO(void); -void lkdtm_REFCOUNT_DEC_NEGATIVE(void); -void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void); -void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void); -void lkdtm_REFCOUNT_INC_ZERO(void); -void lkdtm_REFCOUNT_ADD_ZERO(void); -void lkdtm_REFCOUNT_INC_SATURATED(void); -void lkdtm_REFCOUNT_DEC_SATURATED(void); -void lkdtm_REFCOUNT_ADD_SATURATED(void); -void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void); -void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void); -void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void); -void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void); -void lkdtm_REFCOUNT_TIMING(void); -void lkdtm_ATOMIC_TIMING(void); +/* Category's collection of crashtypes. */ +struct crashtype_category { + struct crashtype *crashtypes; + size_t len; +}; -/* rodata.c */ -void lkdtm_rodata_do_nothing(void); +/* Each category's crashtypes list. */ +extern struct crashtype_category bugs_crashtypes; +extern struct crashtype_category heap_crashtypes; +extern struct crashtype_category perms_crashtypes; +extern struct crashtype_category refcount_crashtypes; +extern struct crashtype_category usercopy_crashtypes; +extern struct crashtype_category stackleak_crashtypes; +extern struct crashtype_category cfi_crashtypes; +extern struct crashtype_category fortify_crashtypes; +extern struct crashtype_category powerpc_crashtypes; -/* usercopy.c */ +/* Each category's init/exit routines. */ +void __init lkdtm_bugs_init(int *recur_param); +void __init lkdtm_heap_init(void); +void __exit lkdtm_heap_exit(void); +void __init lkdtm_perms_init(void); void __init lkdtm_usercopy_init(void); void __exit lkdtm_usercopy_exit(void); -void lkdtm_USERCOPY_HEAP_SIZE_TO(void); -void lkdtm_USERCOPY_HEAP_SIZE_FROM(void); -void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void); -void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void); -void lkdtm_USERCOPY_STACK_FRAME_TO(void); -void lkdtm_USERCOPY_STACK_FRAME_FROM(void); -void lkdtm_USERCOPY_STACK_BEYOND(void); -void lkdtm_USERCOPY_KERNEL(void); - -/* stackleak.c */ -void lkdtm_STACKLEAK_ERASING(void); -/* cfi.c */ -void lkdtm_CFI_FORWARD_PROTO(void); - -/* fortify.c */ -void lkdtm_FORTIFIED_STRSCPY(void); - -/* powerpc.c */ -void lkdtm_PPC_SLB_MULTIHIT(void); +/* Special declaration for function-in-rodata. */ +void lkdtm_rodata_do_nothing(void); #endif diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c index 2dede2ef658f..e1f5e9abb301 100644 --- a/drivers/misc/lkdtm/perms.c +++ b/drivers/misc/lkdtm/perms.c @@ -9,7 +9,9 @@ #include <linux/vmalloc.h> #include <linux/mman.h> #include <linux/uaccess.h> +#include <linux/objtool.h> #include <asm/cacheflush.h> +#include <asm/sections.h> /* Whether or not to fill the target memory area with do_nothing(). */ #define CODE_WRITE true @@ -21,65 +23,98 @@ /* This is non-const, so it will end up in the .data section. */ static u8 data_area[EXEC_SIZE]; -/* This is cost, so it will end up in the .rodata section. */ +/* This is const, so it will end up in the .rodata section. */ static const unsigned long rodata = 0xAA55AA55; /* This is marked __ro_after_init, so it should ultimately be .rodata. */ static unsigned long ro_after_init __ro_after_init = 0x55AA5500; /* + * This is a pointer to do_nothing() which is initialized at runtime rather + * than build time to avoid objtool IBT validation warnings caused by an + * inlined unrolled memcpy() in execute_location(). + */ +static void __ro_after_init *do_nothing_ptr; + +/* * This just returns to the caller. It is designed to be copied into * non-executable memory regions. */ -static void do_nothing(void) +static noinline void do_nothing(void) { return; } /* Must immediately follow do_nothing for size calculuations to work out. */ -static void do_overwritten(void) +static noinline void do_overwritten(void) { pr_info("do_overwritten wasn't overwritten!\n"); return; } -static noinline void execute_location(void *dst, bool write) +static noinline void do_almost_nothing(void) +{ + pr_info("do_nothing was hijacked!\n"); +} + +static void *setup_function_descriptor(func_desc_t *fdesc, void *dst) { - void (*func)(void) = dst; + if (!have_function_descriptors()) + return dst; - pr_info("attempting ok execution at %px\n", do_nothing); + memcpy(fdesc, do_nothing, sizeof(*fdesc)); + fdesc->addr = (unsigned long)dst; + barrier(); + + return fdesc; +} + +static noinline __nocfi void execute_location(void *dst, bool write) +{ + void (*func)(void); + func_desc_t fdesc; + + pr_info("attempting ok execution at %px\n", do_nothing_ptr); do_nothing(); if (write == CODE_WRITE) { - memcpy(dst, do_nothing, EXEC_SIZE); + memcpy(dst, do_nothing_ptr, EXEC_SIZE); flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE); } - pr_info("attempting bad execution at %px\n", func); + pr_info("attempting bad execution at %px\n", dst); + func = setup_function_descriptor(&fdesc, dst); func(); pr_err("FAIL: func returned\n"); } +/* + * Explicitly doing the wrong thing for testing. + */ +ANNOTATE_NOCFI_SYM(execute_location); static void execute_user_location(void *dst) { int copied; /* Intentionally crossing kernel/user memory boundary. */ - void (*func)(void) = dst; + void (*func)(void); + func_desc_t fdesc; + void *do_nothing_text = dereference_function_descriptor(do_nothing); - pr_info("attempting ok execution at %px\n", do_nothing); + pr_info("attempting ok execution at %px\n", do_nothing_text); do_nothing(); - copied = access_process_vm(current, (unsigned long)dst, do_nothing, + copied = access_process_vm(current, (unsigned long)dst, do_nothing_text, EXEC_SIZE, FOLL_WRITE); if (copied < EXEC_SIZE) return; - pr_info("attempting bad execution at %px\n", func); + pr_info("attempting bad execution at %px\n", dst); + func = setup_function_descriptor(&fdesc, dst); func(); pr_err("FAIL: func returned\n"); } -void lkdtm_WRITE_RO(void) +static void lkdtm_WRITE_RO(void) { /* Explicitly cast away "const" for the test and make volatile. */ volatile unsigned long *ptr = (unsigned long *)&rodata; @@ -89,7 +124,7 @@ void lkdtm_WRITE_RO(void) pr_err("FAIL: survived bad write\n"); } -void lkdtm_WRITE_RO_AFTER_INIT(void) +static void lkdtm_WRITE_RO_AFTER_INIT(void) { volatile unsigned long *ptr = &ro_after_init; @@ -108,13 +143,14 @@ void lkdtm_WRITE_RO_AFTER_INIT(void) pr_err("FAIL: survived bad write\n"); } -void lkdtm_WRITE_KERN(void) +static void lkdtm_WRITE_KERN(void) { size_t size; volatile unsigned char *ptr; - size = (unsigned long)do_overwritten - (unsigned long)do_nothing; - ptr = (unsigned char *)do_overwritten; + size = (unsigned long)dereference_function_descriptor(do_overwritten) - + (unsigned long)dereference_function_descriptor(do_nothing); + ptr = dereference_function_descriptor(do_overwritten); pr_info("attempting bad %zu byte write at %px\n", size, ptr); memcpy((void *)ptr, (unsigned char *)do_nothing, size); @@ -124,37 +160,55 @@ void lkdtm_WRITE_KERN(void) do_overwritten(); } -void lkdtm_EXEC_DATA(void) +static void lkdtm_WRITE_OPD(void) +{ + size_t size = sizeof(func_desc_t); + void (*func)(void) = do_nothing; + + if (!have_function_descriptors()) { + pr_info("XFAIL: Platform doesn't use function descriptors.\n"); + return; + } + pr_info("attempting bad %zu bytes write at %px\n", size, do_nothing); + memcpy(do_nothing, do_almost_nothing, size); + pr_err("FAIL: survived bad write\n"); + + asm("" : "=m"(func)); + func(); +} + +static void lkdtm_EXEC_DATA(void) { execute_location(data_area, CODE_WRITE); } -void lkdtm_EXEC_STACK(void) +static void lkdtm_EXEC_STACK(void) { u8 stack_area[EXEC_SIZE]; execute_location(stack_area, CODE_WRITE); } -void lkdtm_EXEC_KMALLOC(void) +static void lkdtm_EXEC_KMALLOC(void) { u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL); execute_location(kmalloc_area, CODE_WRITE); kfree(kmalloc_area); } -void lkdtm_EXEC_VMALLOC(void) +static void lkdtm_EXEC_VMALLOC(void) { u32 *vmalloc_area = vmalloc(EXEC_SIZE); execute_location(vmalloc_area, CODE_WRITE); vfree(vmalloc_area); } -void lkdtm_EXEC_RODATA(void) +static void lkdtm_EXEC_RODATA(void) { - execute_location(lkdtm_rodata_do_nothing, CODE_AS_IS); + execute_location(dereference_function_descriptor(lkdtm_rodata_do_nothing), + CODE_AS_IS); } -void lkdtm_EXEC_USERSPACE(void) +static void lkdtm_EXEC_USERSPACE(void) { unsigned long user_addr; @@ -169,12 +223,12 @@ void lkdtm_EXEC_USERSPACE(void) vm_munmap(user_addr, PAGE_SIZE); } -void lkdtm_EXEC_NULL(void) +static void lkdtm_EXEC_NULL(void) { execute_location(NULL, CODE_AS_IS); } -void lkdtm_ACCESS_USERSPACE(void) +static void lkdtm_ACCESS_USERSPACE(void) { unsigned long user_addr, tmp = 0; unsigned long *ptr; @@ -207,7 +261,7 @@ void lkdtm_ACCESS_USERSPACE(void) vm_munmap(user_addr, PAGE_SIZE); } -void lkdtm_ACCESS_NULL(void) +static void lkdtm_ACCESS_NULL(void) { unsigned long tmp; volatile unsigned long *ptr = (unsigned long *)NULL; @@ -224,6 +278,29 @@ void lkdtm_ACCESS_NULL(void) void __init lkdtm_perms_init(void) { + do_nothing_ptr = dereference_function_descriptor(do_nothing); + /* Make sure we can write to __ro_after_init values during __init */ ro_after_init |= 0xAA; } + +static struct crashtype crashtypes[] = { + CRASHTYPE(WRITE_RO), + CRASHTYPE(WRITE_RO_AFTER_INIT), + CRASHTYPE(WRITE_KERN), + CRASHTYPE(WRITE_OPD), + CRASHTYPE(EXEC_DATA), + CRASHTYPE(EXEC_STACK), + CRASHTYPE(EXEC_KMALLOC), + CRASHTYPE(EXEC_VMALLOC), + CRASHTYPE(EXEC_RODATA), + CRASHTYPE(EXEC_USERSPACE), + CRASHTYPE(EXEC_NULL), + CRASHTYPE(ACCESS_USERSPACE), + CRASHTYPE(ACCESS_NULL), +}; + +struct crashtype_category perms_crashtypes = { + .crashtypes = crashtypes, + .len = ARRAY_SIZE(crashtypes), +}; diff --git a/drivers/misc/lkdtm/powerpc.c b/drivers/misc/lkdtm/powerpc.c index 077c9f9ed8d0..be385449911a 100644 --- a/drivers/misc/lkdtm/powerpc.c +++ b/drivers/misc/lkdtm/powerpc.c @@ -100,7 +100,7 @@ static void insert_dup_slb_entry_0(void) preempt_enable(); } -void lkdtm_PPC_SLB_MULTIHIT(void) +static void lkdtm_PPC_SLB_MULTIHIT(void) { if (!radix_enabled()) { pr_info("Injecting SLB multihit errors\n"); @@ -118,3 +118,12 @@ void lkdtm_PPC_SLB_MULTIHIT(void) pr_err("XFAIL: This test is for ppc64 and with hash mode MMU only\n"); } } + +static struct crashtype crashtypes[] = { + CRASHTYPE(PPC_SLB_MULTIHIT), +}; + +struct crashtype_category powerpc_crashtypes = { + .crashtypes = crashtypes, + .len = ARRAY_SIZE(crashtypes), +}; diff --git a/drivers/misc/lkdtm/refcount.c b/drivers/misc/lkdtm/refcount.c index de7c5ab528d9..8f744bee6fbd 100644 --- a/drivers/misc/lkdtm/refcount.c +++ b/drivers/misc/lkdtm/refcount.c @@ -24,7 +24,7 @@ static void overflow_check(refcount_t *ref) * A refcount_inc() above the maximum value of the refcount implementation, * should at least saturate, and at most also WARN. */ -void lkdtm_REFCOUNT_INC_OVERFLOW(void) +static void lkdtm_REFCOUNT_INC_OVERFLOW(void) { refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1); @@ -40,7 +40,7 @@ void lkdtm_REFCOUNT_INC_OVERFLOW(void) } /* refcount_add() should behave just like refcount_inc() above. */ -void lkdtm_REFCOUNT_ADD_OVERFLOW(void) +static void lkdtm_REFCOUNT_ADD_OVERFLOW(void) { refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1); @@ -58,7 +58,7 @@ void lkdtm_REFCOUNT_ADD_OVERFLOW(void) } /* refcount_inc_not_zero() should behave just like refcount_inc() above. */ -void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void) +static void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void) { refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX); @@ -70,7 +70,7 @@ void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void) } /* refcount_add_not_zero() should behave just like refcount_inc() above. */ -void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void) +static void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void) { refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX); @@ -103,7 +103,7 @@ static void check_zero(refcount_t *ref) * zero it should either saturate (when inc-from-zero isn't protected) * or stay at zero (when inc-from-zero is protected) and should WARN for both. */ -void lkdtm_REFCOUNT_DEC_ZERO(void) +static void lkdtm_REFCOUNT_DEC_ZERO(void) { refcount_t zero = REFCOUNT_INIT(2); @@ -142,7 +142,7 @@ static void check_negative(refcount_t *ref, int start) } /* A refcount_dec() going negative should saturate and may WARN. */ -void lkdtm_REFCOUNT_DEC_NEGATIVE(void) +static void lkdtm_REFCOUNT_DEC_NEGATIVE(void) { refcount_t neg = REFCOUNT_INIT(0); @@ -156,7 +156,7 @@ void lkdtm_REFCOUNT_DEC_NEGATIVE(void) * A refcount_dec_and_test() should act like refcount_dec() above when * going negative. */ -void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void) +static void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void) { refcount_t neg = REFCOUNT_INIT(0); @@ -171,7 +171,7 @@ void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void) * A refcount_sub_and_test() should act like refcount_dec_and_test() * above when going negative. */ -void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void) +static void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void) { refcount_t neg = REFCOUNT_INIT(3); @@ -182,6 +182,21 @@ void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void) check_negative(&neg, 3); } +/* + * A refcount_sub_and_test() by zero when the counter is at zero should act like + * refcount_sub_and_test() above when going negative. + */ +static void lkdtm_REFCOUNT_SUB_AND_TEST_ZERO(void) +{ + refcount_t neg = REFCOUNT_INIT(0); + + pr_info("attempting bad refcount_sub_and_test() at zero\n"); + if (refcount_sub_and_test(0, &neg)) + pr_warn("Weird: refcount_sub_and_test() reported zero\n"); + + check_negative(&neg, 0); +} + static void check_from_zero(refcount_t *ref) { switch (refcount_read(ref)) { @@ -203,7 +218,7 @@ static void check_from_zero(refcount_t *ref) /* * A refcount_inc() from zero should pin to zero or saturate and may WARN. */ -void lkdtm_REFCOUNT_INC_ZERO(void) +static void lkdtm_REFCOUNT_INC_ZERO(void) { refcount_t zero = REFCOUNT_INIT(0); @@ -228,7 +243,7 @@ void lkdtm_REFCOUNT_INC_ZERO(void) * A refcount_add() should act like refcount_inc() above when starting * at zero. */ -void lkdtm_REFCOUNT_ADD_ZERO(void) +static void lkdtm_REFCOUNT_ADD_ZERO(void) { refcount_t zero = REFCOUNT_INIT(0); @@ -267,7 +282,7 @@ static void check_saturated(refcount_t *ref) * A refcount_inc() from a saturated value should at most warn about * being saturated already. */ -void lkdtm_REFCOUNT_INC_SATURATED(void) +static void lkdtm_REFCOUNT_INC_SATURATED(void) { refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); @@ -278,7 +293,7 @@ void lkdtm_REFCOUNT_INC_SATURATED(void) } /* Should act like refcount_inc() above from saturated. */ -void lkdtm_REFCOUNT_DEC_SATURATED(void) +static void lkdtm_REFCOUNT_DEC_SATURATED(void) { refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); @@ -289,7 +304,7 @@ void lkdtm_REFCOUNT_DEC_SATURATED(void) } /* Should act like refcount_inc() above from saturated. */ -void lkdtm_REFCOUNT_ADD_SATURATED(void) +static void lkdtm_REFCOUNT_ADD_SATURATED(void) { refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); @@ -300,7 +315,7 @@ void lkdtm_REFCOUNT_ADD_SATURATED(void) } /* Should act like refcount_inc() above from saturated. */ -void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void) +static void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void) { refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); @@ -312,7 +327,7 @@ void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void) } /* Should act like refcount_inc() above from saturated. */ -void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void) +static void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void) { refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); @@ -324,7 +339,7 @@ void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void) } /* Should act like refcount_inc() above from saturated. */ -void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void) +static void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void) { refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); @@ -336,7 +351,7 @@ void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void) } /* Should act like refcount_inc() above from saturated. */ -void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void) +static void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void) { refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED); @@ -348,7 +363,7 @@ void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void) } /* Used to time the existing atomic_t when used for reference counting */ -void lkdtm_ATOMIC_TIMING(void) +static void lkdtm_ATOMIC_TIMING(void) { unsigned int i; atomic_t count = ATOMIC_INIT(1); @@ -373,7 +388,7 @@ void lkdtm_ATOMIC_TIMING(void) * cd /sys/kernel/debug/provoke-crash * perf stat -B -- cat <(echo REFCOUNT_TIMING) > DIRECT */ -void lkdtm_REFCOUNT_TIMING(void) +static void lkdtm_REFCOUNT_TIMING(void) { unsigned int i; refcount_t count = REFCOUNT_INIT(1); @@ -390,3 +405,31 @@ void lkdtm_REFCOUNT_TIMING(void) else pr_info("refcount timing: done\n"); } + +static struct crashtype crashtypes[] = { + CRASHTYPE(REFCOUNT_INC_OVERFLOW), + CRASHTYPE(REFCOUNT_ADD_OVERFLOW), + CRASHTYPE(REFCOUNT_INC_NOT_ZERO_OVERFLOW), + CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_OVERFLOW), + CRASHTYPE(REFCOUNT_DEC_ZERO), + CRASHTYPE(REFCOUNT_DEC_NEGATIVE), + CRASHTYPE(REFCOUNT_DEC_AND_TEST_NEGATIVE), + CRASHTYPE(REFCOUNT_SUB_AND_TEST_NEGATIVE), + CRASHTYPE(REFCOUNT_SUB_AND_TEST_ZERO), + CRASHTYPE(REFCOUNT_INC_ZERO), + CRASHTYPE(REFCOUNT_ADD_ZERO), + CRASHTYPE(REFCOUNT_INC_SATURATED), + CRASHTYPE(REFCOUNT_DEC_SATURATED), + CRASHTYPE(REFCOUNT_ADD_SATURATED), + CRASHTYPE(REFCOUNT_INC_NOT_ZERO_SATURATED), + CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_SATURATED), + CRASHTYPE(REFCOUNT_DEC_AND_TEST_SATURATED), + CRASHTYPE(REFCOUNT_SUB_AND_TEST_SATURATED), + CRASHTYPE(ATOMIC_TIMING), + CRASHTYPE(REFCOUNT_TIMING), +}; + +struct crashtype_category refcount_crashtypes = { + .crashtypes = crashtypes, + .len = ARRAY_SIZE(crashtypes), +}; diff --git a/drivers/misc/lkdtm/stackleak.c b/drivers/misc/lkdtm/stackleak.c deleted file mode 100644 index 00db21ff115e..000000000000 --- a/drivers/misc/lkdtm/stackleak.c +++ /dev/null @@ -1,82 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * This code tests that the current task stack is properly erased (filled - * with STACKLEAK_POISON). - * - * Authors: - * Alexander Popov <alex.popov@linux.com> - * Tycho Andersen <tycho@tycho.ws> - */ - -#include "lkdtm.h" -#include <linux/stackleak.h> - -void lkdtm_STACKLEAK_ERASING(void) -{ - unsigned long *sp, left, found, i; - const unsigned long check_depth = - STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long); - bool test_failed = false; - - /* - * For the details about the alignment of the poison values, see - * the comment in stackleak_track_stack(). - */ - sp = PTR_ALIGN(&i, sizeof(unsigned long)); - - left = ((unsigned long)sp & (THREAD_SIZE - 1)) / sizeof(unsigned long); - sp--; - - /* - * One 'long int' at the bottom of the thread stack is reserved - * and not poisoned. - */ - if (left > 1) { - left--; - } else { - pr_err("FAIL: not enough stack space for the test\n"); - test_failed = true; - goto end; - } - - pr_info("checking unused part of the thread stack (%lu bytes)...\n", - left * sizeof(unsigned long)); - - /* - * Search for 'check_depth' poison values in a row (just like - * stackleak_erase() does). - */ - for (i = 0, found = 0; i < left && found <= check_depth; i++) { - if (*(sp - i) == STACKLEAK_POISON) - found++; - else - found = 0; - } - - if (found <= check_depth) { - pr_err("FAIL: the erased part is not found (checked %lu bytes)\n", - i * sizeof(unsigned long)); - test_failed = true; - goto end; - } - - pr_info("the erased part begins after %lu not poisoned bytes\n", - (i - found) * sizeof(unsigned long)); - - /* The rest of thread stack should be erased */ - for (; i < left; i++) { - if (*(sp - i) != STACKLEAK_POISON) { - pr_err("FAIL: bad value number %lu in the erased part: 0x%lx\n", - i, *(sp - i)); - test_failed = true; - } - } - -end: - if (test_failed) { - pr_err("FAIL: the thread stack is NOT properly erased!\n"); - pr_expected_config(CONFIG_GCC_PLUGIN_STACKLEAK); - } else { - pr_info("OK: the rest of the thread stack is properly erased\n"); - } -} diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c index 9161ce7ed47a..67db57249a34 100644 --- a/drivers/misc/lkdtm/usercopy.c +++ b/drivers/misc/lkdtm/usercopy.c @@ -5,6 +5,7 @@ */ #include "lkdtm.h" #include <linux/slab.h> +#include <linux/highmem.h> #include <linux/vmalloc.h> #include <linux/sched/task_stack.h> #include <linux/mman.h> @@ -30,12 +31,12 @@ static const unsigned char test_text[] = "This is a test.\n"; */ static noinline unsigned char *trick_compiler(unsigned char *stack) { - return stack + 0; + return stack + unconst; } static noinline unsigned char *do_usercopy_stack_callee(int value) { - unsigned char buf[32]; + unsigned char buf[128]; int i; /* Exercise stack to avoid everything living in registers. */ @@ -43,7 +44,12 @@ static noinline unsigned char *do_usercopy_stack_callee(int value) buf[i] = value & 0xff; } - return trick_compiler(buf); + /* + * Put the target buffer in the middle of stack allocation + * so that we don't step on future stack users regardless + * of stack growth direction. + */ + return trick_compiler(&buf[(128/2)-32]); } static noinline void do_usercopy_stack(bool to_user, bool bad_frame) @@ -66,6 +72,12 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame) bad_stack -= sizeof(unsigned long); } +#ifdef ARCH_HAS_CURRENT_STACK_POINTER + pr_info("stack : %px\n", (void *)current_stack_pointer); +#endif + pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack)); + pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack)); + user_addr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, 0); @@ -119,7 +131,7 @@ free_user: * This checks for whole-object size validation with hardened usercopy, * with or without usercopy whitelisting. */ -static void do_usercopy_heap_size(bool to_user) +static void do_usercopy_slab_size(bool to_user) { unsigned long user_addr; unsigned char *one, *two; @@ -185,9 +197,9 @@ free_kernel: /* * This checks for the specific whitelist window within an object. If this - * test passes, then do_usercopy_heap_size() tests will pass too. + * test passes, then do_usercopy_slab_size() tests will pass too. */ -static void do_usercopy_heap_whitelist(bool to_user) +static void do_usercopy_slab_whitelist(bool to_user) { unsigned long user_alloc; unsigned char *buf = NULL; @@ -261,42 +273,42 @@ free_alloc: } /* Callable tests. */ -void lkdtm_USERCOPY_HEAP_SIZE_TO(void) +static void lkdtm_USERCOPY_SLAB_SIZE_TO(void) { - do_usercopy_heap_size(true); + do_usercopy_slab_size(true); } -void lkdtm_USERCOPY_HEAP_SIZE_FROM(void) +static void lkdtm_USERCOPY_SLAB_SIZE_FROM(void) { - do_usercopy_heap_size(false); + do_usercopy_slab_size(false); } -void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void) +static void lkdtm_USERCOPY_SLAB_WHITELIST_TO(void) { - do_usercopy_heap_whitelist(true); + do_usercopy_slab_whitelist(true); } -void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void) +static void lkdtm_USERCOPY_SLAB_WHITELIST_FROM(void) { - do_usercopy_heap_whitelist(false); + do_usercopy_slab_whitelist(false); } -void lkdtm_USERCOPY_STACK_FRAME_TO(void) +static void lkdtm_USERCOPY_STACK_FRAME_TO(void) { do_usercopy_stack(true, true); } -void lkdtm_USERCOPY_STACK_FRAME_FROM(void) +static void lkdtm_USERCOPY_STACK_FRAME_FROM(void) { do_usercopy_stack(false, true); } -void lkdtm_USERCOPY_STACK_BEYOND(void) +static void lkdtm_USERCOPY_STACK_BEYOND(void) { do_usercopy_stack(true, false); } -void lkdtm_USERCOPY_KERNEL(void) +static void lkdtm_USERCOPY_KERNEL(void) { unsigned long user_addr; @@ -318,7 +330,7 @@ void lkdtm_USERCOPY_KERNEL(void) pr_info("attempting bad copy_to_user from kernel text: %px\n", vm_mmap); - if (copy_to_user((void __user *)user_addr, function_nocfi(vm_mmap), + if (copy_to_user((void __user *)user_addr, vm_mmap, unconst + PAGE_SIZE)) { pr_warn("copy_to_user failed, but lacked Oops\n"); goto free_user; @@ -330,6 +342,86 @@ free_user: vm_munmap(user_addr, PAGE_SIZE); } +/* + * This expects "kaddr" to point to a PAGE_SIZE allocation, which means + * a more complete test that would include copy_from_user() would risk + * memory corruption. Just test copy_to_user() here, as that exercises + * almost exactly the same code paths. + */ +static void do_usercopy_page_span(const char *name, void *kaddr) +{ + unsigned long uaddr; + + uaddr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, 0); + if (uaddr >= TASK_SIZE) { + pr_warn("Failed to allocate user memory\n"); + return; + } + + /* Initialize contents. */ + memset(kaddr, 0xAA, PAGE_SIZE); + + /* Bump the kaddr forward to detect a page-spanning overflow. */ + kaddr += PAGE_SIZE / 2; + + pr_info("attempting good copy_to_user() from kernel %s: %px\n", + name, kaddr); + if (copy_to_user((void __user *)uaddr, kaddr, + unconst + (PAGE_SIZE / 2))) { + pr_err("copy_to_user() failed unexpectedly?!\n"); + goto free_user; + } + + pr_info("attempting bad copy_to_user() from kernel %s: %px\n", + name, kaddr); + if (copy_to_user((void __user *)uaddr, kaddr, unconst + PAGE_SIZE)) { + pr_warn("Good, copy_to_user() failed, but lacked Oops(?!)\n"); + goto free_user; + } + + pr_err("FAIL: bad copy_to_user() not detected!\n"); + pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy"); + +free_user: + vm_munmap(uaddr, PAGE_SIZE); +} + +static void lkdtm_USERCOPY_VMALLOC(void) +{ + void *addr; + + addr = vmalloc(PAGE_SIZE); + if (!addr) { + pr_err("vmalloc() failed!?\n"); + return; + } + do_usercopy_page_span("vmalloc", addr); + vfree(addr); +} + +static void lkdtm_USERCOPY_FOLIO(void) +{ + struct folio *folio; + void *addr; + + /* + * FIXME: Folio checking currently misses 0-order allocations, so + * allocate and bump forward to the last page. + */ + folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 1); + if (!folio) { + pr_err("folio_alloc() failed!?\n"); + return; + } + addr = folio_address(folio); + if (addr) + do_usercopy_page_span("folio", addr + PAGE_SIZE); + else + pr_err("folio_address() failed?!\n"); + folio_put(folio); +} + void __init lkdtm_usercopy_init(void) { /* Prepare cache that lacks SLAB_USERCOPY flag. */ @@ -345,3 +437,21 @@ void __exit lkdtm_usercopy_exit(void) { kmem_cache_destroy(whitelist_cache); } + +static struct crashtype crashtypes[] = { + CRASHTYPE(USERCOPY_SLAB_SIZE_TO), + CRASHTYPE(USERCOPY_SLAB_SIZE_FROM), + CRASHTYPE(USERCOPY_SLAB_WHITELIST_TO), + CRASHTYPE(USERCOPY_SLAB_WHITELIST_FROM), + CRASHTYPE(USERCOPY_STACK_FRAME_TO), + CRASHTYPE(USERCOPY_STACK_FRAME_FROM), + CRASHTYPE(USERCOPY_STACK_BEYOND), + CRASHTYPE(USERCOPY_VMALLOC), + CRASHTYPE(USERCOPY_FOLIO), + CRASHTYPE(USERCOPY_KERNEL), +}; + +struct crashtype_category usercopy_crashtypes = { + .crashtypes = crashtypes, + .len = ARRAY_SIZE(crashtypes), +}; |
