From 61462c8a6b140fe2f93cb911684837e05950e680 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 10 May 2016 10:55:49 -0700 Subject: arm64: kernel: Fix incorrect brk randomization This fixes two issues with the arm64 brk randomziation. First, the STACK_RND_MASK was being used incorrectly. The original code was: unsigned long range_end = base + (STACK_RND_MASK << PAGE_SHIFT) + 1; STACK_RND_MASK is 0x7ff (32-bit) or 0x3ffff (64-bit), with 4K pages where PAGE_SHIFT is 12: #define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \ 0x7ff >> (PAGE_SHIFT - 12) : \ 0x3ffff >> (PAGE_SHIFT - 12)) This means the resulting offset from base would be 0x7ff0001 or 0x3ffff0001, which is wrong since it creates an unaligned end address. It was likely intended to be: unsigned long range_end = base + ((STACK_RND_MASK + 1) << PAGE_SHIFT) Which would result in offsets of 0x800000 (32-bit) and 0x40000000 (64-bit). However, even this corrected 32-bit compat offset (0x00800000) is much smaller than native ARM's brk randomization value (0x02000000): unsigned long arch_randomize_brk(struct mm_struct *mm) { unsigned long range_end = mm->brk + 0x02000000; return randomize_range(mm->brk, range_end, 0) ? : mm->brk; } So, instead of basing arm64's brk randomization on mistaken STACK_RND_MASK calculations, just use specific corrected values for compat (0x2000000) and native arm64 (0x40000000). Reviewed-by: Jon Medhurst Signed-off-by: Kees Cook [will: use is_compat_task() as suggested by tixy] Signed-off-by: Will Deacon --- arch/arm64/kernel/process.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'arch/arm64/kernel/process.c') diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 80624829db61..ad4a7e132ead 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -382,13 +382,14 @@ unsigned long arch_align_stack(unsigned long sp) return sp & ~0xf; } -static unsigned long randomize_base(unsigned long base) -{ - unsigned long range_end = base + (STACK_RND_MASK << PAGE_SHIFT) + 1; - return randomize_range(base, range_end, 0) ? : base; -} - unsigned long arch_randomize_brk(struct mm_struct *mm) { - return randomize_base(mm->brk); + unsigned long range_end = mm->brk; + + if (is_compat_task()) + range_end += 0x02000000; + else + range_end += 0x40000000; + + return randomize_range(mm->brk, range_end, 0) ? : mm->brk; } -- cgit From e6d9a52543338603e25e71e0e4942f05dae0dd8a Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 11 May 2016 17:56:54 +0100 Subject: arm64: do not enforce strict 16 byte alignment to stack pointer copy_thread should not be enforcing 16 byte aligment and returning -EINVAL. Other architectures trap misaligned stack access with SIGBUS so arm64 should follow this convention, so remove the strict enforcement check. For example, currently clone(2) fails with -EINVAL when passing a misaligned stack and this gives little clue to what is wrong. Instead, it is arguable that a SIGBUS on the fist access to a misaligned stack allows one to figure out that it is a misaligned stack issue rather than trying to figure out why an unconventional (and undocumented) -EINVAL is being returned. Acked-by: Catalin Marinas Signed-off-by: Colin Ian King Signed-off-by: Will Deacon --- arch/arm64/kernel/process.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/arm64/kernel/process.c') diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index ad4a7e132ead..48eea6866c67 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -265,9 +265,6 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, if (stack_start) { if (is_compat_thread(task_thread_info(p))) childregs->compat_sp = stack_start; - /* 16-byte aligned stack mandatory on AArch64 */ - else if (stack_start & 15) - return -EINVAL; else childregs->sp = stack_start; } -- cgit