From 15cd0e675f3f76b4d21c313795fe0c23df0ee20f Mon Sep 17 00:00:00 2001 From: Amit Daniel Kachhap Date: Mon, 30 Mar 2020 17:11:39 +0530 Subject: arm64: Kconfig: ptrauth: Add binutils version check to fix mismatch Recent addition of ARM64_PTR_AUTH exposed a mismatch issue with binutils. 9.1+ versions of gcc inserts a section note .note.gnu.property but this can be used properly by binutils version greater than 2.33.1. If older binutils are used then the following warnings are generated, aarch64-linux-ld: warning: arch/arm64/kernel/vdso/vgettimeofday.o: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0000000 aarch64-linux-objdump: warning: arch/arm64/lib/csum.o: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0000000 aarch64-linux-nm: warning: .tmp_vmlinux1: unsupported GNU_PROPERTY_TYPE (5) type: 0xc0000000 This patch enables ARM64_PTR_AUTH when gcc and binutils versions are compatible with each other. Older gcc which do not insert such section continue to work as before. This scenario may not occur with clang as a recent commit 3b446c7d27ddd06 ("arm64: Kconfig: verify binutils support for ARM64_PTR_AUTH") masks binutils version lesser then 2.34. Reported-by: kbuild test robot Suggested-by: Vincenzo Frascino Signed-off-by: Amit Daniel Kachhap [catalin.marinas@arm.com: slight adjustment to the comment] Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index e6712b6818fa..9ba3287bb29d 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1503,7 +1503,10 @@ config ARM64_PTR_AUTH default y depends on !KVM || ARM64_VHE depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC - depends on CC_IS_GCC || (CC_IS_CLANG && AS_HAS_CFI_NEGATE_RA_STATE) + # GCC 9.1 and later inserts a .note.gnu.property section note for PAC + # which is only understood by binutils starting with version 2.33.1. + depends on !CC_IS_GCC || GCC_VERSION < 90100 || LD_VERSION >= 233010000 + depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS) help Pointer authentication (part of the ARMv8.3 Extensions) provides -- cgit From b8fdef311a0bd9223f10754f94fdcf1a594a3457 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 31 Mar 2020 20:44:59 +0100 Subject: arm64: Always force a branch protection mode when the compiler has one Compilers with branch protection support can be configured to enable it by default, it is likely that distributions will do this as part of deploying branch protection system wide. As well as the slight overhead from having some extra NOPs for unused branch protection features this can cause more serious problems when the kernel is providing pointer authentication to userspace but not built for pointer authentication itself. In that case our switching of keys for userspace can affect the kernel unexpectedly, causing pointer authentication instructions in the kernel to corrupt addresses. To ensure that we get consistent and reliable behaviour always explicitly initialise the branch protection mode, ensuring that the kernel is built the same way regardless of the compiler defaults. Fixes: 7503197562567 (arm64: add basic pointer authentication support) Reported-by: Szabolcs Nagy Signed-off-by: Mark Brown Cc: stable@vger.kernel.org [catalin.marinas@arm.com: remove Kconfig option in favour of Makefile check] Signed-off-by: Catalin Marinas --- arch/arm64/Makefile | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index f15f92ba53e6..85e4149cc5d5 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -65,6 +65,10 @@ stack_protector_prepare: prepare0 include/generated/asm-offsets.h)) endif +# Ensure that if the compiler supports branch protection we default it +# off, this will be overridden if we are using branch protection. +branch-prot-flags-y += $(call cc-option,-mbranch-protection=none) + ifeq ($(CONFIG_ARM64_PTR_AUTH),y) branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=all branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=pac-ret+leaf @@ -73,9 +77,10 @@ branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=pa # we pass it only to the assembler. This option is utilized only in case of non # integrated assemblers. branch-prot-flags-$(CONFIG_AS_HAS_PAC) += -Wa,-march=armv8.3-a -KBUILD_CFLAGS += $(branch-prot-flags-y) endif +KBUILD_CFLAGS += $(branch-prot-flags-y) + ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) KBUILD_CPPFLAGS += -mbig-endian CHECKFLAGS += -D__AARCH64EB__ -- cgit From e16e65a02913d29a7b27c4e3a415ceec967b0629 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Sun, 29 Mar 2020 16:12:58 +0200 Subject: arm64: remove CONFIG_DEBUG_ALIGN_RODATA feature When CONFIG_DEBUG_ALIGN_RODATA is enabled, kernel segments mapped with different permissions (r-x for .text, r-- for .rodata, rw- for .data, etc) are rounded up to 2 MiB so they can be mapped more efficiently. In particular, it permits the segments to be mapped using level 2 block entries when using 4k pages, which is expected to result in less TLB pressure. However, the mappings for the bulk of the kernel will use level 2 entries anyway, and the misaligned fringes are organized such that they can take advantage of the contiguous bit, and use far fewer level 3 entries than would be needed otherwise. This makes the value of this feature dubious at best, and since it is not enabled in defconfig or in the distro configs, it does not appear to be in wide use either. So let's just remove it. Signed-off-by: Ard Biesheuvel Acked-by: Mark Rutland Acked-by: Will Deacon Acked-by: Laura Abbott Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig.debug | 13 ------------- arch/arm64/include/asm/memory.h | 12 +----------- 2 files changed, 1 insertion(+), 24 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index 1c906d932d6b..a1efa246c9ed 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug @@ -52,19 +52,6 @@ config DEBUG_WX If in doubt, say "Y". -config DEBUG_ALIGN_RODATA - depends on STRICT_KERNEL_RWX - bool "Align linker sections up to SECTION_SIZE" - help - If this option is enabled, sections that may potentially be marked as - read only or non-executable will be aligned up to the section size of - the kernel. This prevents sections from being split into pages and - avoids a potential TLB penalty. The downside is an increase in - alignment and potentially wasted space. Turn on this option if - performance is more important than memory pressure. - - If in doubt, say N. - config DEBUG_EFI depends on EFI && DEBUG_INFO bool "UEFI debugging" diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 2be67b232499..a1871bb32bb1 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -120,22 +120,12 @@ /* * Alignment of kernel segments (e.g. .text, .data). - */ -#if defined(CONFIG_DEBUG_ALIGN_RODATA) -/* - * 4 KB granule: 1 level 2 entry - * 16 KB granule: 128 level 3 entries, with contiguous bit - * 64 KB granule: 32 level 3 entries, with contiguous bit - */ -#define SEGMENT_ALIGN SZ_2M -#else -/* + * * 4 KB granule: 16 level 3 entries, with contiguous bit * 16 KB granule: 4 level 3 entries, without contiguous bit * 64 KB granule: 1 level 3 entry */ #define SEGMENT_ALIGN SZ_64K -#endif /* * Memory types available. -- cgit From fc2266011accd5aeb8ebc335c381991f20e26e33 Mon Sep 17 00:00:00 2001 From: Fredrik Strupe Date: Wed, 8 Apr 2020 13:29:41 +0200 Subject: arm64: armv8_deprecated: Fix undef_hook mask for thumb setend For thumb instructions, call_undef_hook() in traps.c first reads a u16, and if the u16 indicates a T32 instruction (u16 >= 0xe800), a second u16 is read, which then makes up the the lower half-word of a T32 instruction. For T16 instructions, the second u16 is not read, which makes the resulting u32 opcode always have the upper half set to 0. However, having the upper half of instr_mask in the undef_hook set to 0 masks out the upper half of all thumb instructions - both T16 and T32. This results in trapped T32 instructions with the lower half-word equal to the T16 encoding of setend (b650) being matched, even though the upper half-word is not 0000 and thus indicates a T32 opcode. An example of such a T32 instruction is eaa0b650, which should raise a SIGILL since T32 instructions with an eaa prefix are unallocated as per Arm ARM, but instead works as a SETEND because the second half-word is set to b650. This patch fixes the issue by extending instr_mask to include the upper u32 half, which will still match T16 instructions where the upper half is 0, but not T32 instructions. Fixes: 2d888f48e056 ("arm64: Emulate SETEND for AArch32 tasks") Cc: # 4.0.x- Reviewed-by: Suzuki K Poulose Signed-off-by: Fredrik Strupe Signed-off-by: Catalin Marinas --- arch/arm64/kernel/armv8_deprecated.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index 4cc581af2d96..c19aa81ddc8c 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -601,7 +601,7 @@ static struct undef_hook setend_hooks[] = { }, { /* Thumb mode */ - .instr_mask = 0x0000fff7, + .instr_mask = 0xfffffff7, .instr_val = 0x0000b650, .pstate_mask = (PSR_AA32_T_BIT | PSR_AA32_MODE_MASK), .pstate_val = (PSR_AA32_T_BIT | PSR_AA32_MODE_USR), -- cgit