diff options
Diffstat (limited to 'drivers/misc/lkdtm/bugs.c')
| -rw-r--r-- | drivers/misc/lkdtm/bugs.c | 351 |
1 files changed, 262 insertions, 89 deletions
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c index 88c218a9f8b3..376047beea3d 100644 --- a/drivers/misc/lkdtm/bugs.c +++ b/drivers/misc/lkdtm/bugs.c @@ -6,12 +6,14 @@ * test source files. */ #include "lkdtm.h" +#include <linux/cpu.h> #include <linux/list.h> #include <linux/sched.h> #include <linux/sched/signal.h> #include <linux/sched/task_stack.h> -#include <linux/uaccess.h> #include <linux/slab.h> +#include <linux/stop_machine.h> +#include <linux/uaccess.h> #if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML) #include <asm/desc.h> @@ -29,7 +31,7 @@ struct lkdtm_list { #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0) #define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2) #else -#define REC_STACK_SIZE (THREAD_SIZE / 8) +#define REC_STACK_SIZE (THREAD_SIZE / 8UL) #endif #define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2) @@ -41,20 +43,22 @@ static DEFINE_SPINLOCK(lock_me_up); * Make sure compiler does not optimize this function or stack frame away: * - function marked noinline * - stack variables are marked volatile - * - stack variables are written (memset()) and read (pr_info()) - * - function has external effects (pr_info()) - * */ + * - stack variables are written (memset()) and read (buf[..] passed as arg) + * - function may have external effects (memzero_explicit()) + * - no tail recursion possible + */ static int noinline recursive_loop(int remaining) { volatile char buf[REC_STACK_SIZE]; + volatile int ret; memset((void *)buf, remaining & 0xFF, sizeof(buf)); - pr_info("loop %d/%d ...\n", (int)buf[remaining % sizeof(buf)], - recur_count); if (!remaining) - return 0; + ret = 0; else - return recursive_loop(remaining - 1); + ret = recursive_loop((int)buf[remaining % sizeof(buf)] - 1); + memzero_explicit((void *)buf, sizeof(buf)); + return ret; } /* If the depth is negative, use the default, otherwise keep parameter. */ @@ -66,40 +70,65 @@ void __init lkdtm_bugs_init(int *recur_param) recur_count = *recur_param; } -void lkdtm_PANIC(void) +static void lkdtm_PANIC(void) { panic("dumptest"); } -void lkdtm_BUG(void) +static int panic_stop_irqoff_fn(void *arg) +{ + atomic_t *v = arg; + + /* + * As stop_machine() disables interrupts, all CPUs within this function + * have interrupts disabled and cannot take a regular IPI. + * + * The last CPU which enters here will trigger a panic, and as all CPUs + * cannot take a regular IPI, we'll only be able to stop secondaries if + * smp_send_stop() or crash_smp_send_stop() uses an NMI. + */ + if (atomic_inc_return(v) == num_online_cpus()) + panic("panic stop irqoff test"); + + for (;;) + cpu_relax(); +} + +static void lkdtm_PANIC_STOP_IRQOFF(void) +{ + atomic_t v = ATOMIC_INIT(0); + stop_machine(panic_stop_irqoff_fn, &v, cpu_online_mask); +} + +static void lkdtm_BUG(void) { BUG(); } static int warn_counter; -void lkdtm_WARNING(void) +static void lkdtm_WARNING(void) { WARN_ON(++warn_counter); } -void lkdtm_WARNING_MESSAGE(void) +static void lkdtm_WARNING_MESSAGE(void) { WARN(1, "Warning message trigger count: %d\n", ++warn_counter); } -void lkdtm_EXCEPTION(void) +static void lkdtm_EXCEPTION(void) { *((volatile int *) 0) = 0; } -void lkdtm_LOOP(void) +static void lkdtm_LOOP(void) { for (;;) ; } -void lkdtm_EXHAUST_STACK(void) +static void lkdtm_EXHAUST_STACK(void) { pr_info("Calling function with %lu frame size to depth %d ...\n", REC_STACK_SIZE, recur_count); @@ -113,7 +142,7 @@ static noinline void __lkdtm_CORRUPT_STACK(void *stack) } /* This should trip the stack canary, not corrupt the return address. */ -noinline void lkdtm_CORRUPT_STACK(void) +static noinline void lkdtm_CORRUPT_STACK(void) { /* Use default char array length that triggers stack protection. */ char data[8] __aligned(sizeof(void *)); @@ -123,7 +152,7 @@ noinline void lkdtm_CORRUPT_STACK(void) } /* Same as above but will only get a canary with -fstack-protector-strong */ -noinline void lkdtm_CORRUPT_STACK_STRONG(void) +static noinline void lkdtm_CORRUPT_STACK_STRONG(void) { union { unsigned short shorts[4]; @@ -137,7 +166,7 @@ noinline void lkdtm_CORRUPT_STACK_STRONG(void) static pid_t stack_pid; static unsigned long stack_addr; -void lkdtm_REPORT_STACK(void) +static void lkdtm_REPORT_STACK(void) { volatile uintptr_t magic; pid_t pid = task_pid_nr(current); @@ -151,7 +180,84 @@ void lkdtm_REPORT_STACK(void) pr_info("Stack offset: %d\n", (int)(stack_addr - (uintptr_t)&magic)); } -void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void) +static pid_t stack_canary_pid; +static unsigned long stack_canary; +static unsigned long stack_canary_offset; + +static noinline void __lkdtm_REPORT_STACK_CANARY(void *stack) +{ + int i = 0; + pid_t pid = task_pid_nr(current); + unsigned long *canary = (unsigned long *)stack; + unsigned long current_offset = 0, init_offset = 0; + + /* Do our best to find the canary in a 16 word window ... */ + for (i = 1; i < 16; i++) { + canary = (unsigned long *)stack + i; +#ifdef CONFIG_STACKPROTECTOR + if (*canary == current->stack_canary) + current_offset = i; + if (*canary == init_task.stack_canary) + init_offset = i; +#endif + } + + if (current_offset == 0) { + /* + * If the canary doesn't match what's in the task_struct, + * we're either using a global canary or the stack frame + * layout changed. + */ + if (init_offset != 0) { + pr_err("FAIL: global stack canary found at offset %ld (canary for pid %d matches init_task's)!\n", + init_offset, pid); + } else { + pr_warn("FAIL: did not correctly locate stack canary :(\n"); + pr_expected_config(CONFIG_STACKPROTECTOR); + } + + return; + } else if (init_offset != 0) { + pr_warn("WARNING: found both current and init_task canaries nearby?!\n"); + } + + canary = (unsigned long *)stack + current_offset; + if (stack_canary_pid == 0) { + stack_canary = *canary; + stack_canary_pid = pid; + stack_canary_offset = current_offset; + pr_info("Recorded stack canary for pid %d at offset %ld\n", + stack_canary_pid, stack_canary_offset); + } else if (pid == stack_canary_pid) { + pr_warn("ERROR: saw pid %d again -- please use a new pid\n", pid); + } else { + if (current_offset != stack_canary_offset) { + pr_warn("ERROR: canary offset changed from %ld to %ld!?\n", + stack_canary_offset, current_offset); + return; + } + + if (*canary == stack_canary) { + pr_warn("FAIL: canary identical for pid %d and pid %d at offset %ld!\n", + stack_canary_pid, pid, current_offset); + } else { + pr_info("ok: stack canaries differ between pid %d and pid %d at offset %ld.\n", + stack_canary_pid, pid, current_offset); + /* Reset the test. */ + stack_canary_pid = 0; + } + } +} + +static void lkdtm_REPORT_STACK_CANARY(void) +{ + /* Use default char array length that triggers stack protection. */ + char data[8] __aligned(sizeof(void *)) = { }; + + __lkdtm_REPORT_STACK_CANARY((void *)&data); +} + +static void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void) { static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5}; u32 *p; @@ -166,21 +272,50 @@ void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void) pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n"); } -void lkdtm_SOFTLOCKUP(void) +static void lkdtm_SOFTLOCKUP(void) { preempt_disable(); for (;;) cpu_relax(); } -void lkdtm_HARDLOCKUP(void) +static void lkdtm_HARDLOCKUP(void) { local_irq_disable(); for (;;) cpu_relax(); } -void lkdtm_SPINLOCKUP(void) +static void __lkdtm_SMP_CALL_LOCKUP(void *unused) +{ + for (;;) + cpu_relax(); +} + +static void lkdtm_SMP_CALL_LOCKUP(void) +{ + unsigned int cpu, target; + + cpus_read_lock(); + + cpu = get_cpu(); + target = cpumask_any_but(cpu_online_mask, cpu); + + if (target >= nr_cpu_ids) { + pr_err("FAIL: no other online CPUs\n"); + goto out_put_cpus; + } + + smp_call_function_single(target, __lkdtm_SMP_CALL_LOCKUP, NULL, 1); + + pr_err("FAIL: did not hang\n"); + +out_put_cpus: + put_cpu(); + cpus_read_unlock(); +} + +static void lkdtm_SPINLOCKUP(void) { /* Must be called twice to trigger. */ spin_lock(&lock_me_up); @@ -188,16 +323,17 @@ void lkdtm_SPINLOCKUP(void) __release(&lock_me_up); } -void lkdtm_HUNG_TASK(void) +static void __noreturn lkdtm_HUNG_TASK(void) { set_current_state(TASK_UNINTERRUPTIBLE); schedule(); + BUG(); } -volatile unsigned int huge = INT_MAX - 2; -volatile unsigned int ignored; +static volatile unsigned int huge = INT_MAX - 2; +static volatile unsigned int ignored; -void lkdtm_OVERFLOW_SIGNED(void) +static void lkdtm_OVERFLOW_SIGNED(void) { int value; @@ -212,7 +348,7 @@ void lkdtm_OVERFLOW_SIGNED(void) } -void lkdtm_OVERFLOW_UNSIGNED(void) +static void lkdtm_OVERFLOW_UNSIGNED(void) { unsigned int value; @@ -226,11 +362,11 @@ void lkdtm_OVERFLOW_UNSIGNED(void) ignored = value; } -/* Intentionally using old-style flex array definition of 1 byte. */ +/* Intentionally using unannotated flex array definition. */ struct array_bounds_flex_array { int one; int two; - char data[1]; + char data[]; }; struct array_bounds { @@ -240,7 +376,7 @@ struct array_bounds { int three; }; -void lkdtm_ARRAY_BOUNDS(void) +static void lkdtm_ARRAY_BOUNDS(void) { struct array_bounds_flex_array *not_checked; struct array_bounds *checked; @@ -248,6 +384,11 @@ void lkdtm_ARRAY_BOUNDS(void) not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL); checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL); + if (!not_checked || !checked) { + kfree(not_checked); + kfree(checked); + return; + } pr_info("Array access within bounds ...\n"); /* For both, touch all bytes in the actual member size. */ @@ -257,7 +398,7 @@ void lkdtm_ARRAY_BOUNDS(void) * For the uninstrumented flex array member, also touch 1 byte * beyond to verify it is correctly uninstrumented. */ - for (i = 0; i < sizeof(not_checked->data) + 1; i++) + for (i = 0; i < 2; i++) not_checked->data[i] = 'A'; pr_info("Array access beyond bounds ...\n"); @@ -267,9 +408,53 @@ void lkdtm_ARRAY_BOUNDS(void) kfree(not_checked); kfree(checked); pr_err("FAIL: survived array bounds overflow!\n"); + if (IS_ENABLED(CONFIG_UBSAN_BOUNDS)) + pr_expected_config(CONFIG_UBSAN_TRAP); + else + pr_expected_config(CONFIG_UBSAN_BOUNDS); +} + +struct lkdtm_annotated { + unsigned long flags; + int count; + int array[] __counted_by(count); +}; + +static volatile int fam_count = 4; + +static void lkdtm_FAM_BOUNDS(void) +{ + struct lkdtm_annotated *inst; + + inst = kzalloc(struct_size(inst, array, fam_count + 1), GFP_KERNEL); + if (!inst) { + pr_err("FAIL: could not allocate test struct!\n"); + return; + } + + inst->count = fam_count; + pr_info("Array access within bounds ...\n"); + inst->array[1] = fam_count; + ignored = inst->array[1]; + + pr_info("Array access beyond bounds ...\n"); + inst->array[fam_count] = fam_count; + ignored = inst->array[fam_count]; + + kfree(inst); + + pr_err("FAIL: survived access of invalid flexible array member index!\n"); + + if (!IS_ENABLED(CONFIG_CC_HAS_COUNTED_BY)) + pr_warn("This is expected since this %s was built with a compiler that does not support __counted_by\n", + lkdtm_kernel_info); + else if (IS_ENABLED(CONFIG_UBSAN_BOUNDS)) + pr_expected_config(CONFIG_UBSAN_TRAP); + else + pr_expected_config(CONFIG_UBSAN_BOUNDS); } -void lkdtm_CORRUPT_LIST_ADD(void) +static void lkdtm_CORRUPT_LIST_ADD(void) { /* * Initially, an empty list via LIST_HEAD: @@ -305,11 +490,11 @@ void lkdtm_CORRUPT_LIST_ADD(void) pr_err("Overwrite did not happen, but no BUG?!\n"); else { pr_err("list_add() corruption not detected!\n"); - pr_expected_config(CONFIG_DEBUG_LIST); + pr_expected_config(CONFIG_LIST_HARDENED); } } -void lkdtm_CORRUPT_LIST_DEL(void) +static void lkdtm_CORRUPT_LIST_DEL(void) { LIST_HEAD(test_head); struct lkdtm_list item; @@ -332,12 +517,12 @@ void lkdtm_CORRUPT_LIST_DEL(void) pr_err("Overwrite did not happen, but no BUG?!\n"); else { pr_err("list_del() corruption not detected!\n"); - pr_expected_config(CONFIG_DEBUG_LIST); + pr_expected_config(CONFIG_LIST_HARDENED); } } /* Test that VMAP_STACK is actually allocating with a leading guard page */ -void lkdtm_STACK_GUARD_PAGE_LEADING(void) +static void lkdtm_STACK_GUARD_PAGE_LEADING(void) { const unsigned char *stack = task_stack_page(current); const unsigned char *ptr = stack - 1; @@ -351,7 +536,7 @@ void lkdtm_STACK_GUARD_PAGE_LEADING(void) } /* Test that VMAP_STACK is actually allocating with a trailing guard page */ -void lkdtm_STACK_GUARD_PAGE_TRAILING(void) +static void lkdtm_STACK_GUARD_PAGE_TRAILING(void) { const unsigned char *stack = task_stack_page(current); const unsigned char *ptr = stack + THREAD_SIZE; @@ -364,7 +549,7 @@ void lkdtm_STACK_GUARD_PAGE_TRAILING(void) pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte); } -void lkdtm_UNSET_SMEP(void) +static void lkdtm_UNSET_SMEP(void) { #if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML) #define MOV_CR4_DEPTH 64 @@ -399,6 +584,7 @@ void lkdtm_UNSET_SMEP(void) * the cr4 writing instruction. */ insn = (unsigned char *)native_write_cr4; + OPTIMIZER_HIDE_VAR(insn); for (i = 0; i < MOV_CR4_DEPTH; i++) { /* mov %rdi, %cr4 */ if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7) @@ -430,7 +616,7 @@ void lkdtm_UNSET_SMEP(void) #endif } -void lkdtm_DOUBLE_FAULT(void) +static void lkdtm_DOUBLE_FAULT(void) { #if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML) /* @@ -478,7 +664,7 @@ static noinline void change_pac_parameters(void) } #endif -noinline void lkdtm_CORRUPT_PAC(void) +static noinline void lkdtm_CORRUPT_PAC(void) { #ifdef CONFIG_ARM64 #define CORRUPT_PAC_ITERATE 10 @@ -507,52 +693,39 @@ noinline void lkdtm_CORRUPT_PAC(void) #endif } -void lkdtm_FORTIFY_OBJECT(void) -{ - struct target { - char a[10]; - } target[2] = {}; - int result; - - /* - * Using volatile prevents the compiler from determining the value of - * 'size' at compile time. Without that, we would get a compile error - * rather than a runtime error. - */ - volatile int size = 11; - - pr_info("trying to read past the end of a struct\n"); - - result = memcmp(&target[0], &target[1], size); - - /* Print result to prevent the code from being eliminated */ - pr_err("FAIL: fortify did not catch an object overread!\n" - "\"%d\" was the memcmp result.\n", result); -} - -void lkdtm_FORTIFY_SUBOBJECT(void) -{ - struct target { - char a[10]; - char b[10]; - } target; - char *src; - - src = kmalloc(20, GFP_KERNEL); - strscpy(src, "over ten bytes", 20); - - pr_info("trying to strcpy past the end of a member of a struct\n"); - - /* - * strncpy(target.a, src, 20); will hit a compile error because the - * compiler knows at build time that target.a < 20 bytes. Use strcpy() - * to force a runtime error. - */ - strcpy(target.a, src); - - /* Use target.a to prevent the code from being eliminated */ - pr_err("FAIL: fortify did not catch an sub-object overrun!\n" - "\"%s\" was copied.\n", target.a); +static struct crashtype crashtypes[] = { + CRASHTYPE(PANIC), + CRASHTYPE(PANIC_STOP_IRQOFF), + CRASHTYPE(BUG), + CRASHTYPE(WARNING), + CRASHTYPE(WARNING_MESSAGE), + CRASHTYPE(EXCEPTION), + CRASHTYPE(LOOP), + CRASHTYPE(EXHAUST_STACK), + CRASHTYPE(CORRUPT_STACK), + CRASHTYPE(CORRUPT_STACK_STRONG), + CRASHTYPE(REPORT_STACK), + CRASHTYPE(REPORT_STACK_CANARY), + CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE), + CRASHTYPE(SOFTLOCKUP), + CRASHTYPE(HARDLOCKUP), + CRASHTYPE(SMP_CALL_LOCKUP), + CRASHTYPE(SPINLOCKUP), + CRASHTYPE(HUNG_TASK), + CRASHTYPE(OVERFLOW_SIGNED), + CRASHTYPE(OVERFLOW_UNSIGNED), + CRASHTYPE(ARRAY_BOUNDS), + CRASHTYPE(FAM_BOUNDS), + CRASHTYPE(CORRUPT_LIST_ADD), + CRASHTYPE(CORRUPT_LIST_DEL), + CRASHTYPE(STACK_GUARD_PAGE_LEADING), + CRASHTYPE(STACK_GUARD_PAGE_TRAILING), + CRASHTYPE(UNSET_SMEP), + CRASHTYPE(DOUBLE_FAULT), + CRASHTYPE(CORRUPT_PAC), +}; - kfree(src); -} +struct crashtype_category bugs_crashtypes = { + .crashtypes = crashtypes, + .len = ARRAY_SIZE(crashtypes), +}; |
