summaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/.gitignore1
-rw-r--r--arch/arm/kernel/Makefile43
-rw-r--r--arch/arm/kernel/arch_timer.c6
-rw-r--r--arch/arm/kernel/armksyms.c12
-rw-r--r--arch/arm/kernel/asm-offsets.c83
-rw-r--r--arch/arm/kernel/atags.h7
-rw-r--r--arch/arm/kernel/atags_compat.c5
-rw-r--r--arch/arm/kernel/atags_parse.c45
-rw-r--r--arch/arm/kernel/atags_proc.c17
-rw-r--r--arch/arm/kernel/bios32.c57
-rw-r--r--arch/arm/kernel/bugs.c19
-rw-r--r--arch/arm/kernel/cacheinfo.c173
-rw-r--r--arch/arm/kernel/cpuidle.c19
-rw-r--r--arch/arm/kernel/crash_dump.c32
-rw-r--r--arch/arm/kernel/debug.S59
-rw-r--r--arch/arm/kernel/devtree.c47
-rw-r--r--arch/arm/kernel/dma-isa.c222
-rw-r--r--arch/arm/kernel/dma.c19
-rw-r--r--arch/arm/kernel/early_printk.c21
-rw-r--r--arch/arm/kernel/efi.c67
-rw-r--r--arch/arm/kernel/elf.c52
-rw-r--r--arch/arm/kernel/entry-armv.S653
-rw-r--r--arch/arm/kernel/entry-common.S188
-rw-r--r--arch/arm/kernel/entry-ftrace.S227
-rw-r--r--arch/arm/kernel/entry-header.S110
-rw-r--r--arch/arm/kernel/entry-v7m.S54
-rw-r--r--arch/arm/kernel/fiq.c6
-rw-r--r--arch/arm/kernel/ftrace.c216
-rw-r--r--arch/arm/kernel/head-common.S130
-rw-r--r--arch/arm/kernel/head-inflate-data.c56
-rw-r--r--arch/arm/kernel/head-nommu.S392
-rw-r--r--arch/arm/kernel/head.S318
-rw-r--r--arch/arm/kernel/head.h7
-rw-r--r--arch/arm/kernel/hibernate.c5
-rw-r--r--arch/arm/kernel/hw_breakpoint.c293
-rw-r--r--arch/arm/kernel/hyp-stub.S101
-rw-r--r--arch/arm/kernel/insn.c20
-rw-r--r--arch/arm/kernel/io.c1
-rw-r--r--arch/arm/kernel/irq.c150
-rw-r--r--arch/arm/kernel/isa.c28
-rw-r--r--arch/arm/kernel/iwmmxt.S164
-rw-r--r--arch/arm/kernel/iwmmxt.h47
-rw-r--r--arch/arm/kernel/jump_label.c13
-rw-r--r--arch/arm/kernel/kgdb.c53
-rw-r--r--arch/arm/kernel/machine_kexec.c96
-rw-r--r--arch/arm/kernel/module-plts.c72
-rw-r--r--arch/arm/kernel/module.c250
-rw-r--r--arch/arm/kernel/module.lds4
-rw-r--r--arch/arm/kernel/opcodes.c5
-rw-r--r--arch/arm/kernel/paravirt.c18
-rw-r--r--arch/arm/kernel/patch.c28
-rw-r--r--arch/arm/kernel/perf_callchain.c48
-rw-r--r--arch/arm/kernel/perf_event_v6.c585
-rw-r--r--arch/arm/kernel/perf_event_v7.c2038
-rw-r--r--arch/arm/kernel/perf_event_xscale.c771
-rw-r--r--arch/arm/kernel/perf_regs.c4
-rw-r--r--arch/arm/kernel/phys2virt.S238
-rw-r--r--arch/arm/kernel/pj4-cp0.c137
-rw-r--r--arch/arm/kernel/process.c115
-rw-r--r--arch/arm/kernel/psci_smp.c22
-rw-r--r--arch/arm/kernel/ptrace.c165
-rw-r--r--arch/arm/kernel/reboot.c20
-rw-r--r--arch/arm/kernel/reboot.h1
-rw-r--r--arch/arm/kernel/relocate_kernel.S47
-rw-r--r--arch/arm/kernel/return_address.c25
-rw-r--r--arch/arm/kernel/setup.c242
-rw-r--r--arch/arm/kernel/signal.c328
-rw-r--r--arch/arm/kernel/signal.h13
-rw-r--r--arch/arm/kernel/sigreturn_codes.S66
-rw-r--r--arch/arm/kernel/sleep.S50
-rw-r--r--arch/arm/kernel/smccc-call.S22
-rw-r--r--arch/arm/kernel/smp.c331
-rw-r--r--arch/arm/kernel/smp_scu.c48
-rw-r--r--arch/arm/kernel/smp_tlb.c5
-rw-r--r--arch/arm/kernel/smp_twd.c72
-rw-r--r--arch/arm/kernel/spectre.c71
-rw-r--r--arch/arm/kernel/stacktrace.c223
-rw-r--r--arch/arm/kernel/suspend.c35
-rw-r--r--arch/arm/kernel/swp_emulate.c42
-rw-r--r--arch/arm/kernel/sys_arm.c8
-rw-r--r--arch/arm/kernel/sys_oabi-compat.c238
-rw-r--r--arch/arm/kernel/tcm.c9
-rw-r--r--arch/arm/kernel/thumbee.c14
-rw-r--r--arch/arm/kernel/time.c38
-rw-r--r--arch/arm/kernel/topology.c109
-rw-r--r--arch/arm/kernel/traps.c357
-rw-r--r--arch/arm/kernel/unwind.c147
-rw-r--r--arch/arm/kernel/v7m.c5
-rw-r--r--arch/arm/kernel/vdso.c172
-rw-r--r--arch/arm/kernel/vmcore_info.c10
-rw-r--r--arch/arm/kernel/vmlinux-xip.lds.S292
-rw-r--r--arch/arm/kernel/vmlinux.lds.S253
-rw-r--r--arch/arm/kernel/xscale-cp0.c6
93 files changed, 4495 insertions, 7638 deletions
diff --git a/arch/arm/kernel/.gitignore b/arch/arm/kernel/.gitignore
index c5f676c3c224..bbb90f92d051 100644
--- a/arch/arm/kernel/.gitignore
+++ b/arch/arm/kernel/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
vmlinux.lds
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index ad325a8c7e1e..afc9de7ef9a1 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the linux kernel.
#
@@ -9,6 +10,7 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_insn.o = -pg
CFLAGS_REMOVE_patch.o = -pg
+CFLAGS_REMOVE_unwind.o = -pg
endif
CFLAGS_REMOVE_return_address.o = -pg
@@ -16,10 +18,17 @@ CFLAGS_REMOVE_return_address.o = -pg
# Object file lists.
obj-y := elf.o entry-common.o irq.o opcodes.o \
- process.o ptrace.o reboot.o return_address.o \
+ process.o ptrace.o reboot.o io.o \
setup.o signal.o sigreturn_codes.o \
stacktrace.o sys_arm.o time.o traps.o
+KASAN_SANITIZE_stacktrace.o := n
+KASAN_SANITIZE_traps.o := n
+
+ifneq ($(CONFIG_ARM_UNWIND),y)
+obj-$(CONFIG_FRAME_POINTER) += return_address.o
+endif
+
obj-$(CONFIG_ATAGS) += atags_parse.o
obj-$(CONFIG_ATAGS_PROC) += atags_proc.o
obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o
@@ -30,12 +39,13 @@ else
obj-y += entry-armv.o
endif
+obj-$(CONFIG_MMU) += bugs.o
+obj-$(CONFIG_OF) += cacheinfo.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_ISA_DMA_API) += dma.o
obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
obj-$(CONFIG_MODULES) += armksyms.o module.o
obj-$(CONFIG_ARM_MODULE_PLTS) += module-plts.o
-obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o
obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o
obj-$(CONFIG_HIBERNATION) += hibernate.o
@@ -47,10 +57,11 @@ obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arch_timer.o
obj-$(CONFIG_FUNCTION_TRACER) += entry-ftrace.o
-obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o
-obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o
+obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o patch.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o patch.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o insn.o patch.o
-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_VMCORE_INFO) += vmcore_info.o
# Main staffs in KPROBES are in arch/arm/probes/ .
obj-$(CONFIG_KPROBES) += patch.o insn.o
obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
@@ -61,38 +72,36 @@ obj-$(CONFIG_HAVE_TCM) += tcm.o
obj-$(CONFIG_OF) += devtree.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_SWP_EMULATE) += swp_emulate.o
-CFLAGS_swp_emulate.o := -Wa,-march=armv7-a
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
-obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
-obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \
- perf_event_v7.o
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
obj-$(CONFIG_VDSO) += vdso.o
obj-$(CONFIG_EFI) += efi.o
-
-ifneq ($(CONFIG_ARCH_EBSA110),y)
- obj-y += io.o
-endif
obj-$(CONFIG_PARAVIRT) += paravirt.o
-head-y := head$(MMUEXT).o
+obj-y += head$(MMUEXT).o
obj-$(CONFIG_DEBUG_LL) += debug.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+obj-$(CONFIG_ARM_PATCH_PHYS_VIRT) += phys2virt.o
+
+# This is executed very early using a temporary stack when no memory allocator
+# nor global data is available. Everything has to be allocated on the stack.
+CFLAGS_head-inflate-data.o := $(call cc-option,-Wframe-larger-than=10240)
+obj-$(CONFIG_XIP_DEFLATED_DATA) += head-inflate-data.o
obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
-AFLAGS_hyp-stub.o :=-Wa,-march=armv7-a
ifeq ($(CONFIG_ARM_PSCI),y)
obj-$(CONFIG_SMP) += psci_smp.o
endif
obj-$(CONFIG_HAVE_ARM_SMCCC) += smccc-call.o
-extra-y := $(head-y) vmlinux.lds
+obj-$(CONFIG_GENERIC_CPU_VULNERABILITIES) += spectre.o
+
+always-$(KBUILD_BUILTIN) := vmlinux.lds
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index 1791f12c180b..b5e217907686 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -1,18 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/arch_timer.c
*
* Copyright (C) 2011 ARM Ltd.
* All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <asm/delay.h>
+#include <asm/arch_timer.h>
#include <clocksource/arm_arch_timer.h>
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 8e8d20cdbce7..82e96ac83684 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -1,16 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/armksyms.c
*
* Copyright (C) 2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/string.h>
-#include <linux/cryptohash.h>
#include <linux/delay.h>
#include <linux/in6.h>
#include <linux/syscalls.h>
@@ -87,10 +83,11 @@ EXPORT_SYMBOL(__raw_writesl);
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(__memset32);
+EXPORT_SYMBOL(__memset64);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memchr);
-EXPORT_SYMBOL(__memzero);
EXPORT_SYMBOL(mmioset);
EXPORT_SYMBOL(mmiocpy);
@@ -166,9 +163,6 @@ EXPORT_SYMBOL(_find_next_bit_be);
#endif
#ifdef CONFIG_FUNCTION_TRACER
-#ifdef CONFIG_OLD_MCOUNT
-EXPORT_SYMBOL(mcount);
-#endif
EXPORT_SYMBOL(__gnu_mcount_nc);
#endif
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 608008229c7d..2101938d27fc 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 1995-2003 Russell King
* 2001-2002 Keith Owens
@@ -5,29 +6,30 @@
* Generate definitions needed by assembly language modules.
* This code generates raw asm output which is post-processed to extract
* and format the required data.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
+#define COMPILE_OFFSETS
+
#include <linux/compiler.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
-#ifdef CONFIG_KVM_ARM_HOST
-#include <linux/kvm_host.h>
-#endif
#include <asm/cacheflush.h>
+#include <asm/kexec-internal.h>
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/mach/arch.h>
#include <asm/thread_info.h>
-#include <asm/memory.h>
+#include <asm/page.h>
+#include <asm/mpu.h>
#include <asm/procinfo.h>
#include <asm/suspend.h>
-#include <asm/vdso_datapage.h>
#include <asm/hardware/cache-l2x0.h>
#include <linux/kbuild.h>
+#include <linux/arm-smccc.h>
+
+#include <vdso/datapage.h>
+
+#include "signal.h"
/*
* Make sure that the compiler and target are compatible.
@@ -35,42 +37,20 @@
#if defined(__APCS_26__)
#error Sorry, your compiler targets APCS-26 but this kernel requires APCS-32
#endif
-/*
- * GCC 3.0, 3.1: general bad code generation.
- * GCC 3.2.0: incorrect function argument offset calculation.
- * GCC 3.2.x: miscompiles NEW_AUX_ENT in fs/binfmt_elf.c
- * (http://gcc.gnu.org/PR8896) and incorrect structure
- * initialisation in fs/jffs2/erase.c
- * GCC 4.8.0-4.8.2: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58854
- * miscompiles find_get_entry(), and can result in EXT3 and EXT4
- * filesystem corruption (possibly other FS too).
- */
-#ifdef __GNUC__
-#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
-#error Your compiler is too buggy; it is known to miscompile kernels.
-#error Known good compilers: 3.3, 4.x
-#endif
-#if GCC_VERSION >= 40800 && GCC_VERSION < 40803
-#error Your compiler is too buggy; it is known to miscompile kernels
-#error and result in filesystem corruption and oopses.
-#endif
-#endif
int main(void)
{
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
-#ifdef CONFIG_CC_STACKPROTECTOR
+#ifdef CONFIG_STACKPROTECTOR
DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
#endif
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
- DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
- DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain));
DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context));
- DEFINE(TI_USED_CP, offsetof(struct thread_info, used_cp));
+ DEFINE(TI_ABI_SYSCALL, offsetof(struct thread_info, abi_syscall));
DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value));
DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate));
#ifdef CONFIG_VFP
@@ -79,15 +59,13 @@ int main(void)
DEFINE(VFP_CPU, offsetof(union vfp_state, hard.cpu));
#endif
#endif
+ DEFINE(SOFTIRQ_DISABLE_OFFSET,SOFTIRQ_DISABLE_OFFSET);
#ifdef CONFIG_ARM_THUMBEE
DEFINE(TI_THUMBEE_STATE, offsetof(struct thread_info, thumbee_state));
#endif
#ifdef CONFIG_IWMMXT
DEFINE(TI_IWMMXT_STATE, offsetof(struct thread_info, fpstate.iwmmxt));
#endif
-#ifdef CONFIG_CRUNCH
- DEFINE(TI_CRUNCH_STATE, offsetof(struct thread_info, crunchstate));
-#endif
BLANK();
DEFINE(S_R0, offsetof(struct pt_regs, ARM_r0));
DEFINE(S_R1, offsetof(struct pt_regs, ARM_r1));
@@ -109,9 +87,12 @@ int main(void)
DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0));
DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs));
DEFINE(SVC_DACR, offsetof(struct svc_pt_regs, dacr));
- DEFINE(SVC_ADDR_LIMIT, offsetof(struct svc_pt_regs, addr_limit));
+ DEFINE(SVC_TTBCR, offsetof(struct svc_pt_regs, ttbcr));
DEFINE(SVC_REGS_SIZE, sizeof(struct svc_pt_regs));
BLANK();
+ DEFINE(SIGFRAME_RC3_OFFSET, offsetof(struct sigframe, retcode[3]));
+ DEFINE(RT_SIGFRAME_RC3_OFFSET, offsetof(struct rt_sigframe, sig.retcode[3]));
+ BLANK();
#ifdef CONFIG_CACHE_L2X0
DEFINE(L2X0_R_PHY_BASE, offsetof(struct l2x0_regs, phy_base));
DEFINE(L2X0_R_AUX_CTRL, offsetof(struct l2x0_regs, aux_ctrl));
@@ -164,6 +145,8 @@ int main(void)
DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys));
DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash));
#endif
+ DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id));
+ DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state));
BLANK();
DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
@@ -172,16 +155,20 @@ int main(void)
DEFINE(CACHE_WRITEBACK_ORDER, __CACHE_WRITEBACK_ORDER);
DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE);
BLANK();
-#ifdef CONFIG_KVM_ARM_HOST
- DEFINE(VCPU_GUEST_CTXT, offsetof(struct kvm_vcpu, arch.ctxt));
- DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
- DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp));
- DEFINE(CPU_CTXT_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
- DEFINE(GP_REGS_USR, offsetof(struct kvm_regs, usr_regs));
-#endif
- BLANK();
-#ifdef CONFIG_VDSO
- DEFINE(VDSO_DATA_SIZE, sizeof(union vdso_data_store));
-#endif
+#ifdef CONFIG_ARM_MPU
+ DEFINE(MPU_RNG_INFO_RNGS, offsetof(struct mpu_rgn_info, rgns));
+ DEFINE(MPU_RNG_INFO_USED, offsetof(struct mpu_rgn_info, used));
+
+ DEFINE(MPU_RNG_SIZE, sizeof(struct mpu_rgn));
+ DEFINE(MPU_RGN_DRBAR, offsetof(struct mpu_rgn, drbar));
+ DEFINE(MPU_RGN_DRSR, offsetof(struct mpu_rgn, drsr));
+ DEFINE(MPU_RGN_DRACR, offsetof(struct mpu_rgn, dracr));
+ DEFINE(MPU_RGN_PRBAR, offsetof(struct mpu_rgn, prbar));
+ DEFINE(MPU_RGN_PRLAR, offsetof(struct mpu_rgn, prlar));
+#endif
+ DEFINE(KEXEC_START_ADDR, offsetof(struct kexec_relocate_data, kexec_start_address));
+ DEFINE(KEXEC_INDIR_PAGE, offsetof(struct kexec_relocate_data, kexec_indirection_page));
+ DEFINE(KEXEC_MACH_TYPE, offsetof(struct kexec_relocate_data, kexec_mach_type));
+ DEFINE(KEXEC_R2, offsetof(struct kexec_relocate_data, kexec_r2));
return 0;
}
diff --git a/arch/arm/kernel/atags.h b/arch/arm/kernel/atags.h
index edfa2268c127..f2819c25b602 100644
--- a/arch/arm/kernel/atags.h
+++ b/arch/arm/kernel/atags.h
@@ -1,11 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
void convert_to_tag_list(struct tag *tags);
#ifdef CONFIG_ATAGS
-const struct machine_desc *setup_machine_tags(phys_addr_t __atags_pointer,
+const struct machine_desc *setup_machine_tags(void *__atags_vaddr,
unsigned int machine_nr);
#else
-static inline const struct machine_desc *
-setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
+static inline const struct machine_desc * __init __noreturn
+setup_machine_tags(void *__atags_vaddr, unsigned int machine_nr)
{
early_print("no ATAGS support: can't continue\n");
while (true);
diff --git a/arch/arm/kernel/atags_compat.c b/arch/arm/kernel/atags_compat.c
index 05c28b12353c..10da11c212cc 100644
--- a/arch/arm/kernel/atags_compat.c
+++ b/arch/arm/kernel/atags_compat.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/atags_compat.c
*
* Copyright (C) 2001 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* We keep the old params compatibility cruft in one place (here)
* so we don't end up with lots of mess around other places.
*
diff --git a/arch/arm/kernel/atags_parse.c b/arch/arm/kernel/atags_parse.c
index 98fbfd235ac8..4ec591bde3df 100644
--- a/arch/arm/kernel/atags_parse.c
+++ b/arch/arm/kernel/atags_parse.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Tag parsing.
*
* Copyright (C) 1995-2001 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
/*
@@ -24,6 +21,7 @@
#include <linux/root_dev.h>
#include <linux/screen_info.h>
#include <linux/memblock.h>
+#include <uapi/linux/mount.h>
#include <asm/setup.h>
#include <asm/system_info.h>
@@ -71,18 +69,18 @@ static int __init parse_tag_mem32(const struct tag *tag)
__tagtable(ATAG_MEM, parse_tag_mem32);
-#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
+#if defined(CONFIG_ARCH_FOOTBRIDGE) && defined(CONFIG_VGA_CONSOLE)
static int __init parse_tag_videotext(const struct tag *tag)
{
- screen_info.orig_x = tag->u.videotext.x;
- screen_info.orig_y = tag->u.videotext.y;
- screen_info.orig_video_page = tag->u.videotext.video_page;
- screen_info.orig_video_mode = tag->u.videotext.video_mode;
- screen_info.orig_video_cols = tag->u.videotext.video_cols;
- screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
- screen_info.orig_video_lines = tag->u.videotext.video_lines;
- screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
- screen_info.orig_video_points = tag->u.videotext.video_points;
+ vgacon_screen_info.orig_x = tag->u.videotext.x;
+ vgacon_screen_info.orig_y = tag->u.videotext.y;
+ vgacon_screen_info.orig_video_page = tag->u.videotext.video_page;
+ vgacon_screen_info.orig_video_mode = tag->u.videotext.video_mode;
+ vgacon_screen_info.orig_video_cols = tag->u.videotext.video_cols;
+ vgacon_screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
+ vgacon_screen_info.orig_video_lines = tag->u.videotext.video_lines;
+ vgacon_screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
+ vgacon_screen_info.orig_video_points = tag->u.videotext.video_points;
return 0;
}
@@ -93,8 +91,6 @@ __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
static int __init parse_tag_ramdisk(const struct tag *tag)
{
rd_image_start = tag->u.ramdisk.start;
- rd_doload = (tag->u.ramdisk.flags & 1) == 0;
- rd_prompt = (tag->u.ramdisk.flags & 2) == 0;
if (tag->u.ramdisk.size)
rd_size = tag->u.ramdisk.size;
@@ -131,7 +127,7 @@ static int __init parse_tag_cmdline(const struct tag *tag)
#elif defined(CONFIG_CMDLINE_FORCE)
pr_warn("Ignoring tag cmdline (using the default kernel command line)\n");
#else
- strlcpy(default_command_line, tag->u.cmdline.cmdline,
+ strscpy(default_command_line, tag->u.cmdline.cmdline,
COMMAND_LINE_SIZE);
#endif
return 0;
@@ -178,7 +174,7 @@ static void __init squash_mem_tags(struct tag *tag)
}
const struct machine_desc * __init
-setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
+setup_machine_tags(void *atags_vaddr, unsigned int machine_nr)
{
struct tag *tags = (struct tag *)&default_tags;
const struct machine_desc *mdesc = NULL, *p;
@@ -196,14 +192,11 @@ setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
break;
}
- if (!mdesc) {
- early_print("\nError: unrecognized/unsupported machine ID"
- " (r1 = 0x%08x).\n\n", machine_nr);
- dump_machine_table(); /* does not return */
- }
+ if (!mdesc)
+ return NULL;
- if (__atags_pointer)
- tags = phys_to_virt(__atags_pointer);
+ if (atags_vaddr)
+ tags = atags_vaddr;
else if (mdesc->atag_offset)
tags = (void *)(PAGE_OFFSET + mdesc->atag_offset);
@@ -231,7 +224,7 @@ setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
}
/* parse_early_param needs a boot_command_line */
- strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
+ strscpy(boot_command_line, from, COMMAND_LINE_SIZE);
return mdesc;
}
diff --git a/arch/arm/kernel/atags_proc.c b/arch/arm/kernel/atags_proc.c
index 5a3379055f55..cd09f8ab93e3 100644
--- a/arch/arm/kernel/atags_proc.c
+++ b/arch/arm/kernel/atags_proc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <asm/setup.h>
@@ -6,19 +7,19 @@
struct buffer {
size_t size;
- char data[];
+ char data[] __counted_by(size);
};
static ssize_t atags_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct buffer *b = PDE_DATA(file_inode(file));
+ struct buffer *b = pde_data(file_inode(file));
return simple_read_from_buffer(buf, count, ppos, b->data, b->size);
}
-static const struct file_operations atags_fops = {
- .read = atags_read,
- .llseek = default_llseek,
+static const struct proc_ops atags_proc_ops = {
+ .proc_read = atags_read,
+ .proc_lseek = default_llseek,
};
#define BOOT_PARAMS_SIZE 1536
@@ -41,7 +42,7 @@ static int __init init_atags_procfs(void)
size_t size;
if (tag->hdr.tag != ATAG_CORE) {
- pr_info("No ATAGs?");
+ pr_info("No ATAGs?\n");
return -EINVAL;
}
@@ -53,14 +54,14 @@ static int __init init_atags_procfs(void)
WARN_ON(tag->hdr.tag != ATAG_NONE);
- b = kmalloc(sizeof(*b) + size, GFP_KERNEL);
+ b = kmalloc(struct_size(b, data, size), GFP_KERNEL);
if (!b)
goto nomem;
b->size = size;
memcpy(b->data, atags_copy, size);
- tags_entry = proc_create_data("atags", 0400, NULL, &atags_fops, b);
+ tags_entry = proc_create_data("atags", 0400, NULL, &atags_proc_ops, b);
if (!tags_entry)
goto nomem;
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index c1809fb549dd..b5793e8fbdc1 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/arm/kernel/bios32.c
*
@@ -9,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -141,15 +143,15 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_89C940F,
*/
static void pci_fixup_dec21285(struct pci_dev *dev)
{
- int i;
-
if (dev->devfn == 0) {
+ struct resource *r;
+
dev->class &= 0xff;
dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
- dev->resource[i].start = 0;
- dev->resource[i].end = 0;
- dev->resource[i].flags = 0;
+ pci_dev_for_each_resource(dev, r) {
+ r->start = 0;
+ r->end = 0;
+ r->flags = 0;
}
}
}
@@ -161,13 +163,11 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, pci_fixup_d
static void pci_fixup_ide_bases(struct pci_dev *dev)
{
struct resource *r;
- int i;
if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
return;
- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
- r = dev->resource + i;
+ pci_dev_for_each_resource(dev, r) {
if ((r->start & ~0x80) == 0x374) {
r->start |= 2;
r->end = r->start;
@@ -251,23 +251,6 @@ static void pci_fixup_cy82c693(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, pci_fixup_cy82c693);
-static void pci_fixup_it8152(struct pci_dev *dev)
-{
- int i;
- /* fixup for ITE 8152 devices */
- /* FIXME: add defines for class 0x68000 and 0x80103 */
- if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST ||
- dev->class == 0x68000 ||
- dev->class == 0x80103) {
- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
- dev->resource[i].start = 0;
- dev->resource[i].end = 0;
- dev->resource[i].flags = 0;
- }
- }
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8152, pci_fixup_it8152);
-
/*
* If the bus contains any of these devices, then we must not turn on
* parity checking of any kind. Currently this is CyberPro 20x0 only.
@@ -355,8 +338,8 @@ void pcibios_fixup_bus(struct pci_bus *bus)
/*
* Report what we did for this bus
*/
- pr_info("PCI: bus%d: Fast back to back transfers %sabled\n",
- bus->number, (features & PCI_COMMAND_FAST_BACK) ? "en" : "dis");
+ pr_info("PCI: bus%d: Fast back to back transfers %s\n",
+ bus->number, str_enabled_disabled(features & PCI_COMMAND_FAST_BACK));
}
EXPORT_SYMBOL(pcibios_fixup_bus);
@@ -410,8 +393,7 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return irq;
}
-static int pcibios_init_resource(int busnr, struct pci_sys_data *sys,
- int io_optional)
+static int pcibios_init_resource(int busnr, struct pci_sys_data *sys)
{
int ret;
struct resource_entry *window;
@@ -421,14 +403,6 @@ static int pcibios_init_resource(int busnr, struct pci_sys_data *sys,
&iomem_resource, sys->mem_offset);
}
- /*
- * If a platform says I/O port support is optional, we don't add
- * the default I/O space. The platform is responsible for adding
- * any I/O space it needs.
- */
- if (io_optional)
- return 0;
-
resource_list_for_each_entry(window, &sys->resources)
if (resource_type(window->res) == IORESOURCE_IO)
return 0;
@@ -478,7 +452,7 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
if (ret > 0) {
- ret = pcibios_init_resource(nr, sys, hw->io_optional);
+ ret = pcibios_init_resource(nr, sys);
if (ret) {
pci_free_host_bridge(bridge);
break;
@@ -496,9 +470,6 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
bridge->sysdata = sys;
bridge->busnr = sys->busnr;
bridge->ops = hw->ops;
- bridge->msi = hw->msi_ctrl;
- bridge->align_resource =
- hw->align_resource;
ret = pci_scan_root_bus_bridge(bridge);
}
@@ -526,7 +497,7 @@ void pci_common_init_dev(struct device *parent, struct hw_pci *hw)
struct pci_sys_data *sys;
LIST_HEAD(head);
- pci_add_flags(PCI_REASSIGN_ALL_RSRC);
+ pci_add_flags(PCI_REASSIGN_ALL_BUS);
if (hw->preinit)
hw->preinit();
pcibios_init_hw(parent, hw, &head);
diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c
new file mode 100644
index 000000000000..087bce6ec8e9
--- /dev/null
+++ b/arch/arm/kernel/bugs.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <asm/bugs.h>
+#include <asm/proc-fns.h>
+
+void check_other_bugs(void)
+{
+#ifdef MULTI_CPU
+ if (cpu_check_bugs)
+ cpu_check_bugs();
+#endif
+}
+
+void __init arch_cpu_finalize_init(void)
+{
+ check_writebuffer_bugs();
+ check_other_bugs();
+}
diff --git a/arch/arm/kernel/cacheinfo.c b/arch/arm/kernel/cacheinfo.c
new file mode 100644
index 000000000000..e1469b641780
--- /dev/null
+++ b/arch/arm/kernel/cacheinfo.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARM cacheinfo support
+ *
+ * Copyright (C) 2023 Linaro Ltd.
+ * Copyright (C) 2015 ARM Ltd.
+ * All Rights Reserved
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cacheinfo.h>
+#include <linux/of.h>
+
+#include <asm/cachetype.h>
+#include <asm/cputype.h>
+#include <asm/system_info.h>
+
+/* Ctypen, bits[3(n - 1) + 2 : 3(n - 1)], for n = 1 to 7 */
+#define CLIDR_CTYPE_SHIFT(level) (3 * (level - 1))
+#define CLIDR_CTYPE_MASK(level) (7 << CLIDR_CTYPE_SHIFT(level))
+#define CLIDR_CTYPE(clidr, level) \
+ (((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level))
+
+#define MAX_CACHE_LEVEL 7 /* Max 7 level supported */
+
+#define CTR_FORMAT_MASK GENMASK(31, 29)
+#define CTR_FORMAT_ARMV6 0
+#define CTR_FORMAT_ARMV7 4
+#define CTR_CWG_MASK GENMASK(27, 24)
+#define CTR_DSIZE_LEN_MASK GENMASK(13, 12)
+#define CTR_ISIZE_LEN_MASK GENMASK(1, 0)
+
+/* Also valid for v7m */
+static inline int cache_line_size_cp15(void)
+{
+ u32 ctr = read_cpuid_cachetype();
+ u32 format = FIELD_GET(CTR_FORMAT_MASK, ctr);
+
+ if (format == CTR_FORMAT_ARMV7) {
+ u32 cwg = FIELD_GET(CTR_CWG_MASK, ctr);
+
+ return cwg ? 4 << cwg : ARCH_DMA_MINALIGN;
+ } else if (WARN_ON_ONCE(format != CTR_FORMAT_ARMV6)) {
+ return ARCH_DMA_MINALIGN;
+ }
+
+ return 8 << max(FIELD_GET(CTR_ISIZE_LEN_MASK, ctr),
+ FIELD_GET(CTR_DSIZE_LEN_MASK, ctr));
+}
+
+int cache_line_size(void)
+{
+ if (coherency_max_size != 0)
+ return coherency_max_size;
+
+ /* CP15 is optional / implementation defined before ARMv6 */
+ if (cpu_architecture() < CPU_ARCH_ARMv6)
+ return ARCH_DMA_MINALIGN;
+
+ return cache_line_size_cp15();
+}
+EXPORT_SYMBOL_GPL(cache_line_size);
+
+static inline enum cache_type get_cache_type(int level)
+{
+ u32 clidr;
+
+ if (level > MAX_CACHE_LEVEL)
+ return CACHE_TYPE_NOCACHE;
+
+ clidr = read_clidr();
+
+ return CLIDR_CTYPE(clidr, level);
+}
+
+static void ci_leaf_init(struct cacheinfo *this_leaf,
+ enum cache_type type, unsigned int level)
+{
+ this_leaf->level = level;
+ this_leaf->type = type;
+}
+
+static int detect_cache_level(unsigned int *level_p, unsigned int *leaves_p)
+{
+ unsigned int ctype, level, leaves;
+ u32 ctr, format;
+
+ /* CLIDR is not present before ARMv7/v7m */
+ if (cpu_architecture() < CPU_ARCH_ARMv7)
+ return -EOPNOTSUPP;
+
+ /* Don't try reading CLIDR if CTR declares old format */
+ ctr = read_cpuid_cachetype();
+ format = FIELD_GET(CTR_FORMAT_MASK, ctr);
+ if (format != CTR_FORMAT_ARMV7)
+ return -EOPNOTSUPP;
+
+ for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) {
+ ctype = get_cache_type(level);
+ if (ctype == CACHE_TYPE_NOCACHE) {
+ level--;
+ break;
+ }
+ /* Separate instruction and data caches */
+ leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
+ }
+
+ *level_p = level;
+ *leaves_p = leaves;
+
+ return 0;
+}
+
+int early_cache_level(unsigned int cpu)
+{
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+
+ return detect_cache_level(&this_cpu_ci->num_levels, &this_cpu_ci->num_leaves);
+}
+
+int init_cache_level(unsigned int cpu)
+{
+ unsigned int level, leaves;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ int fw_level;
+ int ret;
+
+ ret = detect_cache_level(&level, &leaves);
+ if (ret)
+ return ret;
+
+ fw_level = of_find_last_cache_level(cpu);
+
+ if (level < fw_level) {
+ /*
+ * some external caches not specified in CLIDR_EL1
+ * the information may be available in the device tree
+ * only unified external caches are considered here
+ */
+ leaves += (fw_level - level);
+ level = fw_level;
+ }
+
+ this_cpu_ci->num_levels = level;
+ this_cpu_ci->num_leaves = leaves;
+ return 0;
+}
+
+int populate_cache_leaves(unsigned int cpu)
+{
+ unsigned int level, idx;
+ enum cache_type type;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+ unsigned int arch = cpu_architecture();
+
+ /* CLIDR is not present before ARMv7/v7m */
+ if (arch < CPU_ARCH_ARMv7)
+ return -EOPNOTSUPP;
+
+ for (idx = 0, level = 1; level <= this_cpu_ci->num_levels &&
+ idx < this_cpu_ci->num_leaves; idx++, level++) {
+ type = get_cache_type(level);
+ if (type == CACHE_TYPE_SEPARATE) {
+ ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
+ ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
+ } else {
+ ci_leaf_init(this_leaf++, type, level);
+ }
+ }
+
+ return 0;
+}
diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c
index a3308ad1a024..fba1f8bb03b5 100644
--- a/arch/arm/kernel/cpuidle.c
+++ b/arch/arm/kernel/cpuidle.c
@@ -1,23 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2012 Linaro Ltd.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/cpuidle.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <asm/cpuidle.h>
extern struct of_cpuidle_method __cpuidle_method_of_table[];
static const struct of_cpuidle_method __cpuidle_method_of_table_sentinel
- __used __section(__cpuidle_method_of_table_end);
+ __used __section("__cpuidle_method_of_table_end");
static struct cpuidle_ops cpuidle_ops[NR_CPUS] __ro_after_init;
@@ -32,8 +25,8 @@ static struct cpuidle_ops cpuidle_ops[NR_CPUS] __ro_after_init;
*
* Returns the index passed as parameter
*/
-int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
+__cpuidle int arm_cpuidle_simple_enter(struct cpuidle_device *dev, struct
+ cpuidle_driver *drv, int index)
{
cpu_do_idle();
@@ -101,8 +94,8 @@ static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu)
ops = arm_cpuidle_get_ops(enable_method);
if (!ops) {
- pr_warn("%s: unsupported enable-method property: %s\n",
- dn->full_name, enable_method);
+ pr_warn("%pOF: unsupported enable-method property: %s\n",
+ dn, enable_method);
return -EOPNOTSUPP;
}
diff --git a/arch/arm/kernel/crash_dump.c b/arch/arm/kernel/crash_dump.c
index 5d1286d51154..938bd932df9a 100644
--- a/arch/arm/kernel/crash_dump.c
+++ b/arch/arm/kernel/crash_dump.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm/kernel/crash_dump.c
*
@@ -7,32 +8,16 @@
* This code is taken from arch/x86/kernel/crash_dump_64.c
* Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
* Copyright (C) IBM Corporation, 2004. All rights reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/errno.h>
#include <linux/crash_dump.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/uio.h>
-/**
- * copy_oldmem_page() - copy one page from old kernel memory
- * @pfn: page frame number to be copied
- * @buf: buffer where the copied page is placed
- * @csize: number of bytes to copy
- * @offset: offset in bytes into the page
- * @userbuf: if set, @buf is int he user address space
- *
- * This function copies one page from old kernel memory into buffer pointed by
- * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes
- * copied or negative error in case of failure.
- */
-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
- size_t csize, unsigned long offset,
- int userbuf)
+ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
+ size_t csize, unsigned long offset)
{
void *vaddr;
@@ -43,14 +28,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!vaddr)
return -ENOMEM;
- if (userbuf) {
- if (copy_to_user(buf, vaddr + offset, csize)) {
- iounmap(vaddr);
- return -EFAULT;
- }
- } else {
- memcpy(buf, vaddr + offset, csize);
- }
+ csize = copy_to_iter(vaddr + offset, csize, iter);
iounmap(vaddr);
return csize;
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index ea9646cc2a0e..d92f44bdf438 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/kernel/debug.S
*
* Copyright (C) 1994-1999 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* 32-bit debugging code
*/
#include <linux/linkage.h>
@@ -55,7 +52,9 @@ ENDPROC(printhex4)
ENTRY(printhex2)
mov r1, #2
-printhex: adr r2, hexbuf
+printhex: adr r2, hexbuf_rel
+ ldr r3, [r2]
+ add r2, r2, r3
add r3, r2, r1
mov r1, #0
strb r1, [r3]
@@ -71,7 +70,11 @@ printhex: adr r2, hexbuf
b printascii
ENDPROC(printhex2)
-hexbuf: .space 16
+ .pushsection .bss
+hexbuf_addr: .space 16
+ .popsection
+ .align
+hexbuf_rel: .long hexbuf_addr - .
.ltorg
@@ -79,25 +82,35 @@ hexbuf: .space 16
ENTRY(printascii)
addruart_current r3, r1, r2
- b 2f
-1: waituart r2, r3
+1: teq r0, #0
+ ldrbne r1, [r0], #1
+ teqne r1, #0
+ reteq lr
+2: teq r1, #'\n'
+ bne 3f
+ mov r1, #'\r'
+#ifdef CONFIG_DEBUG_UART_FLOW_CONTROL
+ waituartcts r2, r3
+#endif
+ waituarttxrdy r2, r3
senduart r1, r3
busyuart r2, r3
- teq r1, #'\n'
- moveq r1, #'\r'
- beq 1b
-2: teq r0, #0
- ldrneb r1, [r0], #1
- teqne r1, #0
- bne 1b
- ret lr
+ mov r1, #'\n'
+3:
+#ifdef CONFIG_DEBUG_UART_FLOW_CONTROL
+ waituartcts r2, r3
+#endif
+ waituarttxrdy r2, r3
+ senduart r1, r3
+ busyuart r2, r3
+ b 1b
ENDPROC(printascii)
ENTRY(printch)
addruart_current r3, r1, r2
mov r1, r0
mov r0, #0
- b 1b
+ b 2b
ENDPROC(printch)
#ifdef CONFIG_MMU
@@ -115,16 +128,26 @@ ENTRY(printascii)
mov r1, r0
mov r0, #0x04 @ SYS_WRITE0
ARM( svc #0x123456 )
+#ifdef CONFIG_CPU_V7M
+ THUMB( bkpt #0xab )
+#else
THUMB( svc #0xab )
+#endif
ret lr
ENDPROC(printascii)
ENTRY(printch)
- adr r1, hexbuf
+ adr r1, hexbuf_rel
+ ldr r2, [r1]
+ add r1, r1, r2
strb r0, [r1]
mov r0, #0x03 @ SYS_WRITEC
ARM( svc #0x123456 )
+#ifdef CONFIG_CPU_V7M
+ THUMB( bkpt #0xab )
+#else
THUMB( svc #0xab )
+#endif
ret lr
ENDPROC(printch)
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index f676febbb270..3b78966e750a 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -1,23 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/devtree.c
*
* Copyright (C) 2009 Canonical Ltd. <jeremy.kerr@canonical.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#include <linux/smp.h>
#include <asm/cputype.h>
@@ -33,7 +28,7 @@
extern struct of_cpu_method __cpu_method_of_table[];
static const struct of_cpu_method __cpu_method_of_table_sentinel
- __used __section(__cpu_method_of_table_end);
+ __used __section("__cpu_method_of_table_end");
static int __init set_smp_ops_by_method(struct device_node *node)
@@ -87,38 +82,16 @@ void __init arm_dt_init_cpu_maps(void)
if (!cpus)
return;
- for_each_child_of_node(cpus, cpu) {
- const __be32 *cell;
- int prop_bytes;
- u32 hwid;
+ for_each_of_cpu_node(cpu) {
+ u32 hwid = of_get_cpu_hwid(cpu, 0);
- if (of_node_cmp(cpu->type, "cpu"))
- continue;
-
- pr_debug(" * %s...\n", cpu->full_name);
- /*
- * A device tree containing CPU nodes with missing "reg"
- * properties is considered invalid to build the
- * cpu_logical_map.
- */
- cell = of_get_property(cpu, "reg", &prop_bytes);
- if (!cell || prop_bytes < sizeof(*cell)) {
- pr_debug(" * %s missing reg property\n",
- cpu->full_name);
- of_node_put(cpu);
- return;
- }
+ pr_debug(" * %pOF...\n", cpu);
/*
* Bits n:24 must be set to 0 in the DT since the reg property
* defines the MPIDR[23:0].
*/
- do {
- hwid = be32_to_cpu(*cell++);
- prop_bytes -= sizeof(*cell);
- } while (!hwid && prop_bytes > 0);
-
- if (prop_bytes || (hwid & ~MPIDR_HWID_BITMASK)) {
+ if (hwid & ~MPIDR_HWID_BITMASK) {
of_node_put(cpu);
return;
}
@@ -211,25 +184,23 @@ static const void * __init arch_get_next_mach(const char *const **match)
/**
* setup_machine_fdt - Machine setup when an dtb was passed to the kernel
- * @dt_phys: physical address of dt blob
+ * @dt_virt: virtual address of dt blob
*
* If a dtb was passed to the kernel in r2, then use it to choose the
* correct machine_desc and to setup the system.
*/
-const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
+const struct machine_desc * __init setup_machine_fdt(void *dt_virt)
{
const struct machine_desc *mdesc, *mdesc_best = NULL;
-#if defined(CONFIG_ARCH_MULTIPLATFORM) || defined(CONFIG_ARM_SINGLE_ARMV7M)
DT_MACHINE_START(GENERIC_DT, "Generic DT based system")
.l2c_aux_val = 0x0,
.l2c_aux_mask = ~0x0,
MACHINE_END
mdesc_best = &__mach_desc_GENERIC_DT;
-#endif
- if (!dt_phys || !early_init_dt_verify(phys_to_virt(dt_phys)))
+ if (!dt_virt || !early_init_dt_verify(dt_virt, __pa(dt_virt)))
return NULL;
mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach);
diff --git a/arch/arm/kernel/dma-isa.c b/arch/arm/kernel/dma-isa.c
deleted file mode 100644
index 84363fe7bad2..000000000000
--- a/arch/arm/kernel/dma-isa.c
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * linux/arch/arm/kernel/dma-isa.c
- *
- * Copyright (C) 1999-2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * ISA DMA primitives
- * Taken from various sources, including:
- * linux/include/asm/dma.h: Defines for using and allocating dma channels.
- * Written by Hennus Bergman, 1992.
- * High DMA channel support & info by Hannu Savolainen and John Boyd,
- * Nov. 1992.
- * arch/arm/kernel/dma-ebsa285.c
- * Copyright (C) 1998 Phil Blundell
- */
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/io.h>
-
-#include <asm/dma.h>
-#include <asm/mach/dma.h>
-
-#define ISA_DMA_MASK 0
-#define ISA_DMA_MODE 1
-#define ISA_DMA_CLRFF 2
-#define ISA_DMA_PGHI 3
-#define ISA_DMA_PGLO 4
-#define ISA_DMA_ADDR 5
-#define ISA_DMA_COUNT 6
-
-static unsigned int isa_dma_port[8][7] = {
- /* MASK MODE CLRFF PAGE_HI PAGE_LO ADDR COUNT */
- { 0x0a, 0x0b, 0x0c, 0x487, 0x087, 0x00, 0x01 },
- { 0x0a, 0x0b, 0x0c, 0x483, 0x083, 0x02, 0x03 },
- { 0x0a, 0x0b, 0x0c, 0x481, 0x081, 0x04, 0x05 },
- { 0x0a, 0x0b, 0x0c, 0x482, 0x082, 0x06, 0x07 },
- { 0xd4, 0xd6, 0xd8, 0x000, 0x000, 0xc0, 0xc2 },
- { 0xd4, 0xd6, 0xd8, 0x48b, 0x08b, 0xc4, 0xc6 },
- { 0xd4, 0xd6, 0xd8, 0x489, 0x089, 0xc8, 0xca },
- { 0xd4, 0xd6, 0xd8, 0x48a, 0x08a, 0xcc, 0xce }
-};
-
-static int isa_get_dma_residue(unsigned int chan, dma_t *dma)
-{
- unsigned int io_port = isa_dma_port[chan][ISA_DMA_COUNT];
- int count;
-
- count = 1 + inb(io_port);
- count |= inb(io_port) << 8;
-
- return chan < 4 ? count : (count << 1);
-}
-
-static void isa_enable_dma(unsigned int chan, dma_t *dma)
-{
- if (dma->invalid) {
- unsigned long address, length;
- unsigned int mode;
- enum dma_data_direction direction;
-
- mode = (chan & 3) | dma->dma_mode;
- switch (dma->dma_mode & DMA_MODE_MASK) {
- case DMA_MODE_READ:
- direction = DMA_FROM_DEVICE;
- break;
-
- case DMA_MODE_WRITE:
- direction = DMA_TO_DEVICE;
- break;
-
- case DMA_MODE_CASCADE:
- direction = DMA_BIDIRECTIONAL;
- break;
-
- default:
- direction = DMA_NONE;
- break;
- }
-
- if (!dma->sg) {
- /*
- * Cope with ISA-style drivers which expect cache
- * coherence.
- */
- dma->sg = &dma->buf;
- dma->sgcount = 1;
- dma->buf.length = dma->count;
- dma->buf.dma_address = dma_map_single(NULL,
- dma->addr, dma->count,
- direction);
- }
-
- address = dma->buf.dma_address;
- length = dma->buf.length - 1;
-
- outb(address >> 16, isa_dma_port[chan][ISA_DMA_PGLO]);
- outb(address >> 24, isa_dma_port[chan][ISA_DMA_PGHI]);
-
- if (chan >= 4) {
- address >>= 1;
- length >>= 1;
- }
-
- outb(0, isa_dma_port[chan][ISA_DMA_CLRFF]);
-
- outb(address, isa_dma_port[chan][ISA_DMA_ADDR]);
- outb(address >> 8, isa_dma_port[chan][ISA_DMA_ADDR]);
-
- outb(length, isa_dma_port[chan][ISA_DMA_COUNT]);
- outb(length >> 8, isa_dma_port[chan][ISA_DMA_COUNT]);
-
- outb(mode, isa_dma_port[chan][ISA_DMA_MODE]);
- dma->invalid = 0;
- }
- outb(chan & 3, isa_dma_port[chan][ISA_DMA_MASK]);
-}
-
-static void isa_disable_dma(unsigned int chan, dma_t *dma)
-{
- outb(chan | 4, isa_dma_port[chan][ISA_DMA_MASK]);
-}
-
-static struct dma_ops isa_dma_ops = {
- .type = "ISA",
- .enable = isa_enable_dma,
- .disable = isa_disable_dma,
- .residue = isa_get_dma_residue,
-};
-
-static struct resource dma_resources[] = { {
- .name = "dma1",
- .start = 0x0000,
- .end = 0x000f
-}, {
- .name = "dma low page",
- .start = 0x0080,
- .end = 0x008f
-}, {
- .name = "dma2",
- .start = 0x00c0,
- .end = 0x00df
-}, {
- .name = "dma high page",
- .start = 0x0480,
- .end = 0x048f
-} };
-
-static dma_t isa_dma[8];
-
-/*
- * ISA DMA always starts at channel 0
- */
-void __init isa_init_dma(void)
-{
- /*
- * Try to autodetect presence of an ISA DMA controller.
- * We do some minimal initialisation, and check that
- * channel 0's DMA address registers are writeable.
- */
- outb(0xff, 0x0d);
- outb(0xff, 0xda);
-
- /*
- * Write high and low address, and then read them back
- * in the same order.
- */
- outb(0x55, 0x00);
- outb(0xaa, 0x00);
-
- if (inb(0) == 0x55 && inb(0) == 0xaa) {
- unsigned int chan, i;
-
- for (chan = 0; chan < 8; chan++) {
- isa_dma[chan].d_ops = &isa_dma_ops;
- isa_disable_dma(chan, NULL);
- }
-
- outb(0x40, 0x0b);
- outb(0x41, 0x0b);
- outb(0x42, 0x0b);
- outb(0x43, 0x0b);
-
- outb(0xc0, 0xd6);
- outb(0x41, 0xd6);
- outb(0x42, 0xd6);
- outb(0x43, 0xd6);
-
- outb(0, 0xd4);
-
- outb(0x10, 0x08);
- outb(0x10, 0xd0);
-
- /*
- * Is this correct? According to my documentation, it
- * doesn't appear to be. It should be:
- * outb(0x3f, 0x40b); outb(0x3f, 0x4d6);
- */
- outb(0x30, 0x40b);
- outb(0x31, 0x40b);
- outb(0x32, 0x40b);
- outb(0x33, 0x40b);
- outb(0x31, 0x4d6);
- outb(0x32, 0x4d6);
- outb(0x33, 0x4d6);
-
- for (i = 0; i < ARRAY_SIZE(dma_resources); i++)
- request_resource(&ioport_resource, dma_resources + i);
-
- for (chan = 0; chan < 8; chan++) {
- int ret = isa_dma_add(chan, &isa_dma[chan]);
- if (ret)
- pr_err("ISADMA%u: unable to register: %d\n",
- chan, ret);
- }
-
- request_dma(DMA_ISA_CASCADE, "cascade");
- }
-}
diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c
index e651c4d0a0d9..ba15b8666498 100644
--- a/arch/arm/kernel/dma.c
+++ b/arch/arm/kernel/dma.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/dma.c
*
* Copyright (C) 1995-2000 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Front-end to the DMA handling. This handles the allocation/freeing
* of DMA channels, and provides a unified interface to the machines
* DMA facilities.
@@ -276,21 +273,9 @@ static int proc_dma_show(struct seq_file *m, void *v)
return 0;
}
-static int proc_dma_open(struct inode *inode, struct file *file)
-{
- return single_open(file, proc_dma_show, NULL);
-}
-
-static const struct file_operations proc_dma_operations = {
- .open = proc_dma_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static int __init proc_dma_init(void)
{
- proc_create("dma", 0, NULL, &proc_dma_operations);
+ proc_create_single("dma", 0, NULL, proc_dma_show);
return 0;
}
diff --git a/arch/arm/kernel/early_printk.c b/arch/arm/kernel/early_printk.c
index 43076536965c..03239ca0d5ce 100644
--- a/arch/arm/kernel/early_printk.c
+++ b/arch/arm/kernel/early_printk.c
@@ -1,26 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/early_printk.c
*
* Copyright (C) 2009 Sascha Hauer <s.hauer@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/console.h>
#include <linux/init.h>
+#include <linux/string.h>
-extern void printch(int);
+extern void printascii(const char *);
static void early_write(const char *s, unsigned n)
{
- while (n-- > 0) {
- if (*s == '\n')
- printch('\r');
- printch(*s);
- s++;
+ char buf[128];
+ while (n) {
+ unsigned l = min(n, sizeof(buf)-1);
+ memcpy(buf, s, l);
+ buf[l] = 0;
+ s += l;
+ n -= l;
+ printascii(buf);
}
}
diff --git a/arch/arm/kernel/efi.c b/arch/arm/kernel/efi.c
index 9f43ba012d10..6f9ec7d28a71 100644
--- a/arch/arm/kernel/efi.c
+++ b/arch/arm/kernel/efi.c
@@ -1,18 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/efi.h>
+#include <linux/memblock.h>
+#include <linux/screen_info.h>
+
#include <asm/efi.h>
#include <asm/mach/map.h>
#include <asm/mmu_context.h>
-static int __init set_permissions(pte_t *ptep, pgtable_t token,
- unsigned long addr, void *data)
+static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
{
efi_memory_desc_t *md = data;
pte_t pte = *ptep;
@@ -26,7 +25,8 @@ static int __init set_permissions(pte_t *ptep, pgtable_t token,
}
int __init efi_set_mapping_permissions(struct mm_struct *mm,
- efi_memory_desc_t *md)
+ efi_memory_desc_t *md,
+ bool ignored)
{
unsigned long base, size;
@@ -74,6 +74,57 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
* If stricter permissions were specified, apply them now.
*/
if (md->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))
- return efi_set_mapping_permissions(mm, md);
+ return efi_set_mapping_permissions(mm, md, false);
return 0;
}
+
+static unsigned long __initdata cpu_state_table = EFI_INVALID_TABLE_ADDR;
+
+const efi_config_table_type_t efi_arch_tables[] __initconst = {
+ {LINUX_EFI_ARM_CPU_STATE_TABLE_GUID, &cpu_state_table},
+ {}
+};
+
+static void __init load_cpu_state_table(void)
+{
+ if (cpu_state_table != EFI_INVALID_TABLE_ADDR) {
+ struct efi_arm_entry_state *state;
+ bool dump_state = true;
+
+ state = early_memremap_ro(cpu_state_table,
+ sizeof(struct efi_arm_entry_state));
+ if (state == NULL) {
+ pr_warn("Unable to map CPU entry state table.\n");
+ return;
+ }
+
+ if ((state->sctlr_before_ebs & 1) == 0)
+ pr_warn(FW_BUG "EFI stub was entered with MMU and Dcache disabled, please fix your firmware!\n");
+ else if ((state->sctlr_after_ebs & 1) == 0)
+ pr_warn(FW_BUG "ExitBootServices() returned with MMU and Dcache disabled, please fix your firmware!\n");
+ else
+ dump_state = false;
+
+ if (dump_state || efi_enabled(EFI_DBG)) {
+ pr_info("CPSR at EFI stub entry : 0x%08x\n",
+ state->cpsr_before_ebs);
+ pr_info("SCTLR at EFI stub entry : 0x%08x\n",
+ state->sctlr_before_ebs);
+ pr_info("CPSR after ExitBootServices() : 0x%08x\n",
+ state->cpsr_after_ebs);
+ pr_info("SCTLR after ExitBootServices(): 0x%08x\n",
+ state->sctlr_after_ebs);
+ }
+ early_memunmap(state, sizeof(struct efi_arm_entry_state));
+ }
+}
+
+void __init arm_efi_init(void)
+{
+ efi_init();
+
+ /* ARM does not permit early mappings to persist across paging_init() */
+ efi_memmap_unmap();
+
+ load_cpu_state_table();
+}
diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c
index d0d1e83150c9..254ab7138c85 100644
--- a/arch/arm/kernel/elf.c
+++ b/arch/arm/kernel/elf.c
@@ -1,8 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/personality.h>
#include <linux/binfmts.h>
#include <linux/elf.h>
+#include <linux/elf-fdpic.h>
#include <asm/system_info.h>
int elf_check_arch(const struct elf32_hdr *x)
@@ -76,16 +78,56 @@ void elf_set_personality(const struct elf32_hdr *x)
EXPORT_SYMBOL(elf_set_personality);
/*
- * Set READ_IMPLIES_EXEC if:
- * - the binary requires an executable stack
- * - we're running on a CPU which doesn't support NX.
+ * An executable for which elf_read_implies_exec() returns TRUE will
+ * have the READ_IMPLIES_EXEC personality flag set automatically.
+ *
+ * The decision process for determining the results are:
+ *
+ *              CPU: | lacks NX*  | has NX |
+ * ELF:              |            |           |
+ * ---------------------|------------|------------|
+ * missing PT_GNU_STACK | exec-all   | exec-all  |
+ * PT_GNU_STACK == RWX  | exec-all   | exec-stack |
+ * PT_GNU_STACK == RW   | exec-all  | exec-none |
+ *
+ * exec-all : all PROT_READ user mappings are executable, except when
+ * backed by files on a noexec-filesystem.
+ * exec-none : only PROT_EXEC user mappings are executable.
+ * exec-stack: only the stack and PROT_EXEC user mappings are executable.
+ *
+ * *this column has no architectural effect: NX markings are ignored by
+ * hardware, but may have behavioral effects when "wants X" collides with
+ * "cannot be X" constraints in memory permission flags, as in
+ * https://lkml.kernel.org/r/20190418055759.GA3155@mellanox.com
+ *
*/
-int arm_elf_read_implies_exec(const struct elf32_hdr *x, int executable_stack)
+int arm_elf_read_implies_exec(int executable_stack)
{
- if (executable_stack != EXSTACK_DISABLE_X)
+ if (executable_stack == EXSTACK_DEFAULT)
return 1;
if (cpu_architecture() < CPU_ARCH_ARMv6)
return 1;
return 0;
}
EXPORT_SYMBOL(arm_elf_read_implies_exec);
+
+#if defined(CONFIG_MMU) && defined(CONFIG_BINFMT_ELF_FDPIC)
+
+void elf_fdpic_arch_lay_out_mm(struct elf_fdpic_params *exec_params,
+ struct elf_fdpic_params *interp_params,
+ unsigned long *start_stack,
+ unsigned long *start_brk)
+{
+ elf_set_personality(&exec_params->hdr);
+
+ exec_params->load_addr = 0x8000;
+ interp_params->load_addr = ELF_ET_DYN_BASE;
+ *start_stack = TASK_SIZE - SZ_16M;
+
+ if ((exec_params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) == ELF_FDPIC_FLAG_INDEPENDENT) {
+ exec_params->flags &= ~ELF_FDPIC_FLAG_ARRANGEMENT;
+ exec_params->flags |= ELF_FDPIC_FLAG_CONSTDISP;
+ }
+}
+
+#endif
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index c731f0d2b2af..ef6a657c8d13 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/kernel/entry-armv.S
*
@@ -5,10 +6,6 @@
* ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
* nommu support by Hyok S. Choi (hyok.choi@samsung.com)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Low-level vector interface routines
*
* Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
@@ -18,44 +15,61 @@
#include <linux/init.h>
#include <asm/assembler.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/vfpmacros.h>
-#ifndef CONFIG_MULTI_IRQ_HANDLER
-#include <mach/entry-macro.S>
-#endif
#include <asm/thread_notify.h>
#include <asm/unwind.h>
#include <asm/unistd.h>
#include <asm/tls.h>
#include <asm/system_info.h>
+#include <asm/uaccess-asm.h>
+#include <asm/kasan_def.h>
#include "entry-header.S"
-#include <asm/entry-macro-multi.S>
#include <asm/probes.h>
+#ifdef CONFIG_HAVE_LD_DEAD_CODE_DATA_ELIMINATION
+#define RELOC_TEXT_NONE .reloc .text, R_ARM_NONE, .
+#else
+#define RELOC_TEXT_NONE
+#endif
+
/*
* Interrupt handling.
*/
- .macro irq_handler
-#ifdef CONFIG_MULTI_IRQ_HANDLER
- ldr r1, =handle_arch_irq
- mov r0, sp
- badr lr, 9997f
- ldr pc, [r1]
-#else
- arch_irq_handler_default
+ .macro irq_handler, from_user:req
+ mov r1, sp
+ ldr_this_cpu r2, irq_stack_ptr, r2, r3
+ .if \from_user == 0
+ @
+ @ If we took the interrupt while running in the kernel, we may already
+ @ be using the IRQ stack, so revert to the original value in that case.
+ @
+ subs r3, r2, r1 @ SP above bottom of IRQ stack?
+ rsbscs r3, r3, #THREAD_SIZE @ ... and below the top?
+#ifdef CONFIG_VMAP_STACK
+ ldr_va r3, high_memory, cc @ End of the linear region
+ cmpcc r3, r1 @ Stack pointer was below it?
#endif
-9997:
+ bcc 0f @ If not, switch to the IRQ stack
+ mov r0, r1
+ bl generic_handle_arch_irq
+ b 1f
+0:
+ .endif
+
+ mov_l r0, generic_handle_arch_irq
+ bl call_with_stack
+1:
.endm
.macro pabt_helper
@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
#ifdef MULTI_PABORT
- ldr ip, .LCprocfns
- mov lr, pc
- ldr pc, [ip, #PROCESSOR_PABT_FUNC]
+ ldr_va ip, processor, offset=PROCESSOR_PABT_FUNC
+ bl_r ip
#else
bl CPU_PABORT_HANDLER
#endif
@@ -74,19 +88,14 @@
@ the fault status register in r1. r9 must be preserved.
@
#ifdef MULTI_DABORT
- ldr ip, .LCprocfns
- mov lr, pc
- ldr pc, [ip, #PROCESSOR_DABT_FUNC]
+ ldr_va ip, processor, offset=PROCESSOR_DABT_FUNC
+ bl_r ip
#else
bl CPU_DABORT_HANDLER
#endif
.endm
-#ifdef CONFIG_KPROBES
- .section .kprobes.text,"ax",%progbits
-#else
- .text
-#endif
+ .section .entry.text,"ax",%progbits
/*
* Invalid mode handlers
@@ -149,27 +158,35 @@ ENDPROC(__und_invalid)
#define SPFIX(code...)
#endif
- .macro svc_entry, stack_hole=0, trace=1, uaccess=1
+ .macro svc_entry, stack_hole=0, trace=1, uaccess=1, overflow_check=1
UNWIND(.fnstart )
- UNWIND(.save {r0 - pc} )
- sub sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
+ sub sp, sp, #(SVC_REGS_SIZE + \stack_hole)
+ THUMB( add sp, r1 ) @ get SP in a GPR without
+ THUMB( sub r1, sp, r1 ) @ using a temp register
+
+ .if \overflow_check
+ UNWIND(.save {r0 - pc} )
+ do_overflow_check (SVC_REGS_SIZE + \stack_hole)
+ .endif
+
#ifdef CONFIG_THUMB2_KERNEL
- SPFIX( str r0, [sp] ) @ temporarily saved
- SPFIX( mov r0, sp )
- SPFIX( tst r0, #4 ) @ test original stack alignment
- SPFIX( ldr r0, [sp] ) @ restored
+ tst r1, #4 @ test stack pointer alignment
+ sub r1, sp, r1 @ restore original R1
+ sub sp, r1 @ restore original SP
#else
SPFIX( tst sp, #4 )
#endif
- SPFIX( subeq sp, sp, #4 )
- stmia sp, {r1 - r12}
+ SPFIX( subne sp, sp, #4 )
+
+ ARM( stmib sp, {r1 - r12} )
+ THUMB( stmia sp, {r0 - r12} ) @ No STMIB in Thumb-2
ldmia r0, {r3 - r5}
- add r7, sp, #S_SP - 4 @ here for interlock avoidance
+ add r7, sp, #S_SP @ here for interlock avoidance
mov r6, #-1 @ "" "" "" ""
- add r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
- SPFIX( addeq r2, r2, #4 )
- str r3, [sp, #-4]! @ save the "real" r0 copied
+ add r2, sp, #(SVC_REGS_SIZE + \stack_hole)
+ SPFIX( addne r2, r2, #4 )
+ str r3, [sp] @ save the "real" r0 copied
@ from the exception stack
mov r3, lr
@@ -186,15 +203,7 @@ ENDPROC(__und_invalid)
stmia r7, {r2 - r6}
get_thread_info tsk
- ldr r0, [tsk, #TI_ADDR_LIMIT]
- mov r1, #TASK_SIZE
- str r1, [tsk, #TI_ADDR_LIMIT]
- str r0, [sp, #SVC_ADDR_LIMIT]
-
- uaccess_save r0
- .if \uaccess
- uaccess_disable r0
- .endif
+ uaccess_entry tsk, r0, r1, r2, \uaccess
.if \trace
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -216,9 +225,9 @@ ENDPROC(__dabt_svc)
.align 5
__irq_svc:
svc_entry
- irq_handler
+ irq_handler from_user=0
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
ldr r0, [tsk, #TI_FLAGS] @ get flags
teq r8, #0 @ if preempt count != 0
@@ -233,7 +242,7 @@ ENDPROC(__irq_svc)
.ltorg
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
svc_preempt:
mov r8, lr
1: bl preempt_schedule_irq @ irq en/disable is done inside
@@ -266,31 +275,10 @@ __und_svc:
#else
svc_entry
#endif
- @
- @ call emulation code, which returns using r9 if it has emulated
- @ the instruction, or the more conventional lr if we are to treat
- @ this as a real undefined instruction
- @
- @ r0 - instruction
- @
-#ifndef CONFIG_THUMB2_KERNEL
- ldr r0, [r4, #-4]
-#else
- mov r1, #2
- ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
- cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
- blo __und_svc_fault
- ldrh r9, [r4] @ bottom 16 bits
- add r4, r4, #2
- str r4, [sp, #S_PC]
- orr r0, r9, r0, lsl #16
-#endif
- badr r9, __und_svc_finish
- mov r2, r4
- bl call_fpe
mov r1, #4 @ PC correction to apply
-__und_svc_fault:
+ THUMB( tst r5, #PSR_T_BIT ) @ exception taken in Thumb mode?
+ THUMB( movne r1, #2 ) @ if so, fix up PC correction
mov r0, sp @ struct pt_regs *regs
bl __und_fault
@@ -319,16 +307,6 @@ __fiq_svc:
UNWIND(.fnend )
ENDPROC(__fiq_svc)
- .align 5
-.LCcralign:
- .word cr_alignment
-#ifdef MULTI_DABORT
-.LCprocfns:
- .word processor
-#endif
-.LCfp:
- .word fp_enter
-
/*
* Abort mode handlers
*/
@@ -387,7 +365,7 @@ ENDPROC(__fiq_abt)
THUMB( stmia sp, {r0 - r12} )
ATRAP( mrc p15, 0, r7, c1, c0, 0)
- ATRAP( ldr r8, .LCcralign)
+ ATRAP( ldr_va r8, cr_alignment)
ldmia r0, {r3 - r5}
add r0, sp, #S_PC @ here for interlock avoidance
@@ -396,8 +374,6 @@ ENDPROC(__fiq_abt)
str r3, [sp] @ save the "real" r0 copied
@ from the exception stack
- ATRAP( ldr r8, [r8, #0])
-
@
@ We are now ready to fill in the remaining blanks on the stack:
@
@@ -419,6 +395,8 @@ ENDPROC(__fiq_abt)
ATRAP( teq r8, r7)
ATRAP( mcrne p15, 0, r8, c1, c0, 0)
+ reload_current r7, r8
+
@
@ Clear FP to mark the first stack frame
@
@@ -441,7 +419,8 @@ ENDPROC(__fiq_abt)
@ if it was interrupted in a critical region. Here we
@ perform a quick test inline since it should be false
@ 99.9999% of the time. The rest is done out of line.
- cmp r4, #TASK_SIZE
+ ldr r0, =TASK_SIZE
+ cmp r4, r0
blhs kuser_cmpxchg64_fixup
#endif
#endif
@@ -461,7 +440,7 @@ ENDPROC(__dabt_usr)
__irq_usr:
usr_entry
kuser_cmpxchg_check
- irq_handler
+ irq_handler from_user=1
get_thread_info tsk
mov why, #0
b ret_to_user_from_irq
@@ -474,274 +453,26 @@ ENDPROC(__irq_usr)
__und_usr:
usr_entry uaccess=0
- mov r2, r4
- mov r3, r5
-
- @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
- @ faulting instruction depending on Thumb mode.
- @ r3 = regs->ARM_cpsr
- @
- @ The emulation code returns using r9 if it has emulated the
- @ instruction, or the more conventional lr if we are to treat
- @ this as a real undefined instruction
- @
- badr r9, ret_from_exception
-
@ IRQs must be enabled before attempting to read the instruction from
@ user space since that could cause a page/translation fault if the
@ page table was modified by another CPU.
enable_irq
- tst r3, #PSR_T_BIT @ Thumb mode?
- bne __und_usr_thumb
- sub r4, r2, #4 @ ARM instr at LR - 4
-1: ldrt r0, [r4]
- ARM_BE8(rev r0, r0) @ little endian instruction
-
- uaccess_disable ip
-
- @ r0 = 32-bit ARM instruction which caused the exception
- @ r2 = PC value for the following instruction (:= regs->ARM_pc)
- @ r4 = PC value for the faulting instruction
- @ lr = 32-bit undefined instruction function
- badr lr, __und_usr_fault_32
- b call_fpe
-
-__und_usr_thumb:
- @ Thumb instruction
- sub r4, r2, #2 @ First half of thumb instr at LR - 2
-#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
-/*
- * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
- * can never be supported in a single kernel, this code is not applicable at
- * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be
- * made about .arch directives.
- */
-#if __LINUX_ARM_ARCH__ < 7
-/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
-#define NEED_CPU_ARCHITECTURE
- ldr r5, .LCcpu_architecture
- ldr r5, [r5]
- cmp r5, #CPU_ARCH_ARMv7
- blo __und_usr_fault_16 @ 16bit undefined instruction
-/*
- * The following code won't get run unless the running CPU really is v7, so
- * coding round the lack of ldrht on older arches is pointless. Temporarily
- * override the assembler target arch with the minimum required instead:
- */
- .arch armv6t2
+ tst r5, #PSR_T_BIT @ Thumb mode?
+ mov r1, #2 @ set insn size to 2 for Thumb
+ bne 0f @ handle as Thumb undef exception
+#ifdef CONFIG_FPE_NWFPE
+ adr r9, ret_from_exception
+ bl call_fpe @ returns via R9 on success
#endif
-2: ldrht r5, [r4]
-ARM_BE8(rev16 r5, r5) @ little endian instruction
- cmp r5, #0xe800 @ 32bit instruction if xx != 0
- blo __und_usr_fault_16_pan @ 16bit undefined instruction
-3: ldrht r0, [r2]
-ARM_BE8(rev16 r0, r0) @ little endian instruction
+ mov r1, #4 @ set insn size to 4 for ARM
+0: mov r0, sp
uaccess_disable ip
- add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
- str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
- orr r0, r0, r5, lsl #16
- badr lr, __und_usr_fault_32
- @ r0 = the two 16-bit Thumb instructions which caused the exception
- @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
- @ r4 = PC value for the first 16-bit Thumb instruction
- @ lr = 32bit undefined instruction function
-
-#if __LINUX_ARM_ARCH__ < 7
-/* If the target arch was overridden, change it back: */
-#ifdef CONFIG_CPU_32v6K
- .arch armv6k
-#else
- .arch armv6
-#endif
-#endif /* __LINUX_ARM_ARCH__ < 7 */
-#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
- b __und_usr_fault_16
-#endif
+ bl __und_fault
+ b ret_from_exception
UNWIND(.fnend)
ENDPROC(__und_usr)
-/*
- * The out of line fixup for the ldrt instructions above.
- */
- .pushsection .text.fixup, "ax"
- .align 2
-4: str r4, [sp, #S_PC] @ retry current instruction
- ret r9
- .popsection
- .pushsection __ex_table,"a"
- .long 1b, 4b
-#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
- .long 2b, 4b
- .long 3b, 4b
-#endif
- .popsection
-
-/*
- * Check whether the instruction is a co-processor instruction.
- * If yes, we need to call the relevant co-processor handler.
- *
- * Note that we don't do a full check here for the co-processor
- * instructions; all instructions with bit 27 set are well
- * defined. The only instructions that should fault are the
- * co-processor instructions. However, we have to watch out
- * for the ARM6/ARM7 SWI bug.
- *
- * NEON is a special case that has to be handled here. Not all
- * NEON instructions are co-processor instructions, so we have
- * to make a special case of checking for them. Plus, there's
- * five groups of them, so we have a table of mask/opcode pairs
- * to check against, and if any match then we branch off into the
- * NEON handler code.
- *
- * Emulators may wish to make use of the following registers:
- * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
- * r2 = PC value to resume execution after successful emulation
- * r9 = normal "successful" return address
- * r10 = this threads thread_info structure
- * lr = unrecognised instruction return address
- * IRQs enabled, FIQs enabled.
- */
- @
- @ Fall-through from Thumb-2 __und_usr
- @
-#ifdef CONFIG_NEON
- get_thread_info r10 @ get current thread
- adr r6, .LCneon_thumb_opcodes
- b 2f
-#endif
-call_fpe:
- get_thread_info r10 @ get current thread
-#ifdef CONFIG_NEON
- adr r6, .LCneon_arm_opcodes
-2: ldr r5, [r6], #4 @ mask value
- ldr r7, [r6], #4 @ opcode bits matching in mask
- cmp r5, #0 @ end mask?
- beq 1f
- and r8, r0, r5
- cmp r8, r7 @ NEON instruction?
- bne 2b
- mov r7, #1
- strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
- strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
- b do_vfp @ let VFP handler handle this
-1:
-#endif
- tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
- tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
- reteq lr
- and r8, r0, #0x00000f00 @ mask out CP number
- THUMB( lsr r8, r8, #8 )
- mov r7, #1
- add r6, r10, #TI_USED_CP
- ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
- THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
-#ifdef CONFIG_IWMMXT
- @ Test if we need to give access to iWMMXt coprocessors
- ldr r5, [r10, #TI_FLAGS]
- rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
- movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
- bcs iwmmxt_task_enable
-#endif
- ARM( add pc, pc, r8, lsr #6 )
- THUMB( lsl r8, r8, #2 )
- THUMB( add pc, r8 )
- nop
-
- ret.w lr @ CP#0
- W(b) do_fpe @ CP#1 (FPE)
- W(b) do_fpe @ CP#2 (FPE)
- ret.w lr @ CP#3
-#ifdef CONFIG_CRUNCH
- b crunch_task_enable @ CP#4 (MaverickCrunch)
- b crunch_task_enable @ CP#5 (MaverickCrunch)
- b crunch_task_enable @ CP#6 (MaverickCrunch)
-#else
- ret.w lr @ CP#4
- ret.w lr @ CP#5
- ret.w lr @ CP#6
-#endif
- ret.w lr @ CP#7
- ret.w lr @ CP#8
- ret.w lr @ CP#9
-#ifdef CONFIG_VFP
- W(b) do_vfp @ CP#10 (VFP)
- W(b) do_vfp @ CP#11 (VFP)
-#else
- ret.w lr @ CP#10 (VFP)
- ret.w lr @ CP#11 (VFP)
-#endif
- ret.w lr @ CP#12
- ret.w lr @ CP#13
- ret.w lr @ CP#14 (Debug)
- ret.w lr @ CP#15 (Control)
-
-#ifdef NEED_CPU_ARCHITECTURE
- .align 2
-.LCcpu_architecture:
- .word __cpu_architecture
-#endif
-
-#ifdef CONFIG_NEON
- .align 6
-
-.LCneon_arm_opcodes:
- .word 0xfe000000 @ mask
- .word 0xf2000000 @ opcode
-
- .word 0xff100000 @ mask
- .word 0xf4000000 @ opcode
-
- .word 0x00000000 @ mask
- .word 0x00000000 @ opcode
-
-.LCneon_thumb_opcodes:
- .word 0xef000000 @ mask
- .word 0xef000000 @ opcode
-
- .word 0xff100000 @ mask
- .word 0xf9000000 @ opcode
-
- .word 0x00000000 @ mask
- .word 0x00000000 @ opcode
-#endif
-
-do_fpe:
- ldr r4, .LCfp
- add r10, r10, #TI_FPSTATE @ r10 = workspace
- ldr pc, [r4] @ Call FP module USR entry point
-
-/*
- * The FP module is called with these registers set:
- * r0 = instruction
- * r2 = PC+4
- * r9 = normal "successful" return address
- * r10 = FP workspace
- * lr = unrecognised FP instruction return address
- */
-
- .pushsection .data
-ENTRY(fp_enter)
- .word no_fp
- .popsection
-
-ENTRY(no_fp)
- ret lr
-ENDPROC(no_fp)
-
-__und_usr_fault_32:
- mov r1, #4
- b 1f
-__und_usr_fault_16_pan:
- uaccess_disable ip
-__und_usr_fault_16:
- mov r1, #2
-1: mov r0, sp
- badr lr, ret_from_exception
- b __und_fault
-ENDPROC(__und_usr_fault_32)
-ENDPROC(__und_usr_fault_16)
-
.align 5
__pabt_usr:
usr_entry
@@ -794,14 +525,17 @@ ENTRY(__switch_to)
ldr r6, [r2, #TI_CPU_DOMAIN]
#endif
switch_tls r1, r4, r5, r3, r7
-#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
- ldr r7, [r2, #TI_TASK]
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \
+ !defined(CONFIG_STACKPROTECTOR_PER_TASK)
ldr r8, =__stack_chk_guard
.if (TSK_STACK_CANARY > IMM12_MASK)
- add r7, r7, #TSK_STACK_CANARY & ~IMM12_MASK
+ add r9, r2, #TSK_STACK_CANARY & ~IMM12_MASK
+ ldr r9, [r9, #TSK_STACK_CANARY & IMM12_MASK]
+ .else
+ ldr r9, [r2, #TSK_STACK_CANARY & IMM12_MASK]
.endif
- ldr r7, [r7, #TSK_STACK_CANARY & IMM12_MASK]
#endif
+ mov r7, r2 @ Preserve 'next'
#ifdef CONFIG_CPU_USE_DOMAINS
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
#endif
@@ -810,18 +544,109 @@ ENTRY(__switch_to)
ldr r0, =thread_notify_head
mov r1, #THREAD_NOTIFY_SWITCH
bl atomic_notifier_call_chain
-#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
- str r7, [r8]
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \
+ !defined(CONFIG_STACKPROTECTOR_PER_TASK)
+ str r9, [r8]
#endif
- THUMB( mov ip, r4 )
mov r0, r5
- ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
- THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
- THUMB( ldr sp, [ip], #4 )
- THUMB( ldr pc, [ip] )
+#if !defined(CONFIG_THUMB2_KERNEL) && !defined(CONFIG_VMAP_STACK)
+ set_current r7, r8
+ ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
+#else
+ mov r1, r7
+ ldmia r4, {r4 - sl, fp, ip, lr} @ Load all regs saved previously
+#ifdef CONFIG_VMAP_STACK
+ @
+ @ Do a dummy read from the new stack while running from the old one so
+ @ that we can rely on do_translation_fault() to fix up any stale PMD
+ @ entries covering the vmalloc region.
+ @
+ ldr r2, [ip]
+#ifdef CONFIG_KASAN_VMALLOC
+ @ Also dummy read from the KASAN shadow memory for the new stack if we
+ @ are using KASAN
+ mov_l r2, KASAN_SHADOW_OFFSET
+ add r2, r2, ip, lsr #KASAN_SHADOW_SCALE_SHIFT
+ ldr r2, [r2]
+#endif
+#endif
+
+ @ When CONFIG_THREAD_INFO_IN_TASK=n, the update of SP itself is what
+ @ effectuates the task switch, as that is what causes the observable
+ @ values of current and current_thread_info to change. When
+ @ CONFIG_THREAD_INFO_IN_TASK=y, setting current (and therefore
+ @ current_thread_info) is done explicitly, and the update of SP just
+ @ switches us to another stack, with few other side effects. In order
+ @ to prevent this distinction from causing any inconsistencies, let's
+ @ keep the 'set_current' call as close as we can to the update of SP.
+ set_current r1, r2
+ mov sp, ip
+ ret lr
+#endif
UNWIND(.fnend )
ENDPROC(__switch_to)
+#ifdef CONFIG_VMAP_STACK
+ .text
+ .align 2
+__bad_stack:
+ @
+ @ We've just detected an overflow. We need to load the address of this
+ @ CPU's overflow stack into the stack pointer register. We have only one
+ @ scratch register so let's use a sequence of ADDs including one
+ @ involving the PC, and decorate them with PC-relative group
+ @ relocations. As these are ARM only, switch to ARM mode first.
+ @
+ @ We enter here with IP clobbered and its value stashed on the mode
+ @ stack.
+ @
+THUMB( bx pc )
+THUMB( nop )
+THUMB( .arm )
+ ldr_this_cpu_armv6 ip, overflow_stack_ptr
+
+ str sp, [ip, #-4]! @ Preserve original SP value
+ mov sp, ip @ Switch to overflow stack
+ pop {ip} @ Original SP in IP
+
+#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
+ mov ip, ip @ mov expected by unwinder
+ push {fp, ip, lr, pc} @ GCC flavor frame record
+#else
+ str ip, [sp, #-8]! @ store original SP
+ push {fpreg, lr} @ Clang flavor frame record
+#endif
+UNWIND( ldr ip, [r0, #4] ) @ load exception LR
+UNWIND( str ip, [sp, #12] ) @ store in the frame record
+ ldr ip, [r0, #12] @ reload IP
+
+ @ Store the original GPRs to the new stack.
+ svc_entry uaccess=0, overflow_check=0
+
+UNWIND( .save {sp, pc} )
+UNWIND( .save {fpreg, lr} )
+UNWIND( .setfp fpreg, sp )
+
+ ldr fpreg, [sp, #S_SP] @ Add our frame record
+ @ to the linked list
+#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
+ ldr r1, [fp, #4] @ reload SP at entry
+ add fp, fp, #12
+#else
+ ldr r1, [fpreg, #8]
+#endif
+ str r1, [sp, #S_SP] @ store in pt_regs
+
+ @ Stash the regs for handle_bad_stack
+ mov r0, sp
+
+ @ Time to die
+ bl handle_bad_stack
+ nop
+UNWIND( .fnend )
+ENDPROC(__bad_stack)
+#endif
+
__INIT
/*
@@ -832,7 +657,7 @@ ENDPROC(__switch_to)
* existing ones. This mechanism should be used only for things that are
* really small and justified, and not be abused freely.
*
- * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
+ * See Documentation/arch/arm/kernel_user_helpers.rst for formal definitions.
*/
THUMB( .arm )
@@ -875,7 +700,7 @@ __kuser_cmpxchg64: @ 0xffff0f60
smp_dmb arm
1: ldrexd r0, r1, [r2] @ load current val
eors r3, r0, r4 @ compare with oldval (1)
- eoreqs r3, r1, r5 @ compare with oldval (2)
+ eorseq r3, r1, r5 @ compare with oldval (2)
strexdeq r3, r6, r7, [r2] @ store newval if eq
teqeq r3, #1 @ success?
beq 1b @ if no then retry
@@ -899,8 +724,8 @@ __kuser_cmpxchg64: @ 0xffff0f60
ldmia r1, {r6, lr} @ load new val
1: ldmia r2, {r0, r1} @ load current val
eors r3, r0, r4 @ compare with oldval (1)
- eoreqs r3, r1, r5 @ compare with oldval (2)
-2: stmeqia r2, {r6, lr} @ store newval if eq
+ eorseq r3, r1, r5 @ compare with oldval (2)
+2: stmiaeq r2, {r6, lr} @ store newval if eq
rsbs r0, r3, #0 @ set return val and C flag
ldmfd sp!, {r4, r5, r6, pc}
@@ -914,7 +739,7 @@ kuser_cmpxchg64_fixup:
mov r7, #0xffff0fff
sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
subs r8, r4, r7
- rsbcss r8, r8, #(2b - 1b)
+ rsbscs r8, r8, #(2b - 1b)
strcs r7, [sp, #S_PC]
#if __LINUX_ARM_ARCH__ < 6
bcc kuser_cmpxchg32_fixup
@@ -972,7 +797,7 @@ kuser_cmpxchg32_fixup:
mov r7, #0xffff0fff
sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
subs r8, r4, r7
- rsbcss r8, r8, #(2b - 1b)
+ rsbscs r8, r8, #(2b - 1b)
strcs r7, [sp, #S_PC]
ret lr
.previous
@@ -1035,17 +860,23 @@ __kuser_helper_end:
*/
.macro vector_stub, name, mode, correction=0
.align 5
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+vector_bhb_bpiall_\name:
+ mcr p15, 0, r0, c7, c5, 6 @ BPIALL
+ @ isb not needed due to "movs pc, lr" in the vector stub
+ @ which gives a "context synchronisation".
+#endif
vector_\name:
.if \correction
sub lr, lr, #\correction
.endif
- @
- @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
- @ (parent CPSR)
- @
+ @ Save r0, lr_<exception> (parent PC)
stmia sp, {r0, lr} @ save r0, lr
+
+ @ Save spsr_<exception> (parent CPSR)
+.Lvec_\name:
mrs lr, spsr
str lr, [sp, #8] @ save spsr
@@ -1067,14 +898,47 @@ vector_\name:
movs pc, lr @ branch to handler in SVC mode
ENDPROC(vector_\name)
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+ .subsection 1
+ .align 5
+vector_bhb_loop8_\name:
+ .if \correction
+ sub lr, lr, #\correction
+ .endif
+
+ @ Save r0, lr_<exception> (parent PC)
+ stmia sp, {r0, lr}
+
+ @ bhb workaround
+ mov r0, #8
+3: W(b) . + 4
+ subs r0, r0, #1
+ bne 3b
+ dsb nsh
+ @ isb not needed due to "movs pc, lr" in the vector stub
+ @ which gives a "context synchronisation".
+ b .Lvec_\name
+ENDPROC(vector_bhb_loop8_\name)
+ .previous
+#endif
+
.align 2
@ handler addresses follow this label
1:
.endm
.section .stubs, "ax", %progbits
- @ This must be the first word
+ @ These need to remain at the start of the section so that
+ @ they are in range of the 'SWI' entries in the vector tables
+ @ located 4k down.
+.L__vector_swi:
.word vector_swi
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+.L__vector_bhb_loop8_swi:
+ .word vector_bhb_loop8_swi
+.L__vector_bhb_bpiall_swi:
+ .word vector_bhb_bpiall_swi
+#endif
vector_rst:
ARM( swi SYS_ERROR0 )
@@ -1189,8 +1053,10 @@ vector_addrexcptn:
* FIQ "NMI" handler
*-----------------------------------------------------------------------------
* Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
- * systems.
+ * systems. This must be the last vector stub, so lets place it in its own
+ * subsection.
*/
+ .subsection 2
vector_stub fiq, FIQ_MODE, 4
.long __fiq_usr @ 0 (USR_26 / USR_32)
@@ -1213,24 +1079,49 @@ vector_addrexcptn:
.globl vector_fiq
.section .vectors, "ax", %progbits
-.L__vectors_start:
+ RELOC_TEXT_NONE
W(b) vector_rst
W(b) vector_und
- W(ldr) pc, .L__vectors_start + 0x1000
+ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_swi )
+THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_swi )
+ W(ldr) pc, .
W(b) vector_pabt
W(b) vector_dabt
W(b) vector_addrexcptn
W(b) vector_irq
W(b) vector_fiq
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+ .section .vectors.bhb.loop8, "ax", %progbits
+ RELOC_TEXT_NONE
+ W(b) vector_rst
+ W(b) vector_bhb_loop8_und
+ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_loop8_swi )
+THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_bhb_loop8_swi )
+ W(ldr) pc, .
+ W(b) vector_bhb_loop8_pabt
+ W(b) vector_bhb_loop8_dabt
+ W(b) vector_addrexcptn
+ W(b) vector_bhb_loop8_irq
+ W(b) vector_bhb_loop8_fiq
+
+ .section .vectors.bhb.bpiall, "ax", %progbits
+ RELOC_TEXT_NONE
+ W(b) vector_rst
+ W(b) vector_bhb_bpiall_und
+ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_bpiall_swi )
+THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_bhb_bpiall_swi )
+ W(ldr) pc, .
+ W(b) vector_bhb_bpiall_pabt
+ W(b) vector_bhb_bpiall_dabt
+ W(b) vector_addrexcptn
+ W(b) vector_bhb_bpiall_irq
+ W(b) vector_bhb_bpiall_fiq
+#endif
+
.data
+ .align 2
.globl cr_alignment
cr_alignment:
.space 4
-
-#ifdef CONFIG_MULTI_IRQ_HANDLER
- .globl handle_arch_irq
-handle_arch_irq:
- .space 4
-#endif
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index eb5cd77bf1d8..88336a1292bb 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -1,52 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/kernel/entry-common.S
*
* Copyright (C) 2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <asm/assembler.h>
#include <asm/unistd.h>
#include <asm/ftrace.h>
#include <asm/unwind.h>
+#include <asm/page.h>
#ifdef CONFIG_AEABI
#include <asm/unistd-oabi.h>
#endif
.equ NR_syscalls, __NR_syscalls
-#ifdef CONFIG_NEED_RET_TO_USER
-#include <mach/entry-macro.S>
-#else
- .macro arch_ret_to_user, tmp1, tmp2
- .endm
-#endif
-
#include "entry-header.S"
+saved_psr .req r8
+#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING_USER)
+saved_pc .req r9
+#define TRACE(x...) x
+#else
+saved_pc .req lr
+#define TRACE(x...)
+#endif
+ .section .entry.text,"ax",%progbits
.align 5
-#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
+#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING_USER) || \
+ IS_ENABLED(CONFIG_DEBUG_RSEQ))
/*
* This is the fast syscall return path. We do as little as possible here,
* such as avoiding writing r0 to the stack. We only use this path if we
- * have tracing and context tracking disabled - the overheads from those
- * features make this path too inefficient.
+ * have tracing, context tracking and rseq debug disabled - the overheads
+ * from those features make this path too inefficient.
*/
ret_fast_syscall:
+__ret_fast_syscall:
UNWIND(.fnstart )
UNWIND(.cantunwind )
disable_irq_notrace @ disable interrupts
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+ movs r1, r1, lsl #16
bne fast_work_pending
- /* perform architecture specific actions before user return */
- arch_ret_to_user r1, lr
-
restore_user_regs fast = 1, offset = S_OFF
UNWIND(.fnend )
ENDPROC(ret_fast_syscall)
@@ -57,17 +56,24 @@ fast_work_pending:
/* fall through to work_pending */
#else
/*
- * The "replacement" ret_fast_syscall for when tracing or context tracking
- * is enabled. As we will need to call out to some C functions, we save
- * r0 first to avoid needing to save registers around each C function call.
+ * The "replacement" ret_fast_syscall for when tracing, context tracking,
+ * or rseq debug is enabled. As we will need to call out to some C functions,
+ * we save r0 first to avoid needing to save registers around each C function
+ * call.
*/
ret_fast_syscall:
+__ret_fast_syscall:
UNWIND(.fnstart )
UNWIND(.cantunwind )
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
+#if IS_ENABLED(CONFIG_DEBUG_RSEQ)
+ /* do_rseq_syscall needs interrupts enabled. */
+ mov r0, sp @ 'regs'
+ bl do_rseq_syscall
+#endif
disable_irq_notrace @ disable interrupts
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+ movs r1, r1, lsl #16
beq no_work_pending
UNWIND(.fnend )
ENDPROC(ret_fast_syscall)
@@ -84,6 +90,7 @@ slow_work_pending:
cmp r0, #0
beq no_work_pending
movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
+ str scno, [tsk, #TI_ABI_SYSCALL] @ make sure tracers see update
ldmia sp, {r0 - r6} @ have to reload r0 - r6
b local_restart @ ... and off we go
ENDPROC(ret_fast_syscall)
@@ -96,18 +103,25 @@ ENDPROC(ret_fast_syscall)
*/
ENTRY(ret_to_user)
ret_slow_syscall:
+#if IS_ENABLED(CONFIG_DEBUG_RSEQ)
+ /* do_rseq_syscall needs interrupts enabled. */
+ enable_irq_notrace @ enable interrupts
+ mov r0, sp @ 'regs'
+ bl do_rseq_syscall
+#endif
disable_irq_notrace @ disable interrupts
ENTRY(ret_to_user_from_irq)
ldr r1, [tsk, #TI_FLAGS]
- tst r1, #_TIF_WORK_MASK
+ movs r1, r1, lsl #16
bne slow_work_pending
no_work_pending:
asm_trace_hardirqs_on save = 0
- /* perform architecture specific actions before user return */
- arch_ret_to_user r1, lr
ct_user_enter save = 0
+#ifdef CONFIG_KSTACK_ERASE
+ bl stackleak_erase_on_task_stack
+#endif
restore_user_regs fast = 0, offset = 0
ENDPROC(ret_to_user_from_irq)
ENDPROC(ret_to_user)
@@ -131,26 +145,52 @@ ENDPROC(ret_from_fork)
*/
.align 5
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+ENTRY(vector_bhb_loop8_swi)
+ sub sp, sp, #PT_REGS_SIZE
+ stmia sp, {r0 - r12}
+ mov r8, #8
+1: b 2f
+2: subs r8, r8, #1
+ bne 1b
+ dsb nsh
+ isb
+ b 3f
+ENDPROC(vector_bhb_loop8_swi)
+
+ .align 5
+ENTRY(vector_bhb_bpiall_swi)
+ sub sp, sp, #PT_REGS_SIZE
+ stmia sp, {r0 - r12}
+ mcr p15, 0, r8, c7, c5, 6 @ BPIALL
+ isb
+ b 3f
+ENDPROC(vector_bhb_bpiall_swi)
+#endif
+ .align 5
ENTRY(vector_swi)
#ifdef CONFIG_CPU_V7M
v7m_exception_entry
#else
sub sp, sp, #PT_REGS_SIZE
stmia sp, {r0 - r12} @ Calling r0 - r12
+3:
ARM( add r8, sp, #S_PC )
ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
THUMB( mov r8, sp )
THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr
- mrs r8, spsr @ called from non-FIQ mode, so ok.
- str lr, [sp, #S_PC] @ Save calling PC
- str r8, [sp, #S_PSR] @ Save CPSR
+ mrs saved_psr, spsr @ called from non-FIQ mode, so ok.
+ TRACE( mov saved_pc, lr )
+ str saved_pc, [sp, #S_PC] @ Save calling PC
+ str saved_psr, [sp, #S_PSR] @ Save CPSR
str r0, [sp, #S_OLD_R0] @ Save OLD_R0
#endif
+ reload_current r10, ip
zero_fp
- alignment_trap r10, ip, __cr_alignment
- enable_irq
- ct_user_exit
- get_thread_info tsk
+ alignment_trap r10, ip, cr_alignment
+ asm_trace_hardirqs_on save=0
+ enable_irq_notrace
+ ct_user_exit save=0
/*
* Get the system call number.
@@ -163,11 +203,11 @@ ENTRY(vector_swi)
* value to determine if it is an EABI or an old ABI call.
*/
#ifdef CONFIG_ARM_THUMB
- tst r8, #PSR_T_BIT
+ tst saved_psr, #PSR_T_BIT
movne r10, #0 @ no thumb OABI emulation
- USER( ldreq r10, [lr, #-4] ) @ get SWI instruction
+ USER( ldreq r10, [saved_pc, #-4] ) @ get SWI instruction
#else
- USER( ldr r10, [lr, #-4] ) @ get SWI instruction
+ USER( ldr r10, [saved_pc, #-4] ) @ get SWI instruction
#endif
ARM_BE8(rev r10, r10) @ little endian instruction
@@ -178,16 +218,19 @@ ENTRY(vector_swi)
*/
#elif defined(CONFIG_ARM_THUMB)
/* Legacy ABI only, possibly thumb mode. */
- tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
+ tst saved_psr, #PSR_T_BIT @ this is SPSR from save_user_regs
addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
- USER( ldreq scno, [lr, #-4] )
+ USER( ldreq scno, [saved_pc, #-4] )
#else
/* Legacy ABI only. */
- USER( ldr scno, [lr, #-4] ) @ get SWI instruction
+ USER( ldr scno, [saved_pc, #-4] ) @ get SWI instruction
#endif
+ /* saved_psr and saved_pc are now dead */
+
uaccess_disable tbl
+ get_thread_info tsk
adr tbl, sys_call_table @ load syscall table pointer
@@ -199,12 +242,22 @@ ENTRY(vector_swi)
* get the old ABI syscall table address.
*/
bics r10, r10, #0xff000000
+ strne r10, [tsk, #TI_ABI_SYSCALL]
+ streq scno, [tsk, #TI_ABI_SYSCALL]
eorne scno, r10, #__NR_OABI_SYSCALL_BASE
ldrne tbl, =sys_oabi_call_table
#elif !defined(CONFIG_AEABI)
bic scno, scno, #0xff000000 @ mask off SWI op-code
+ str scno, [tsk, #TI_ABI_SYSCALL]
eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
+#else
+ str scno, [tsk, #TI_ABI_SYSCALL]
#endif
+ /*
+ * Reload the registers that may have been corrupted on entry to
+ * the syscall assembly (by tracing or context tracking.)
+ */
+ TRACE( ldmia sp, {r0 - r3} )
local_restart:
ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing
@@ -213,9 +266,7 @@ local_restart:
tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
bne __sys_trace
- cmp scno, #NR_syscalls @ check upper syscall limit
- badr lr, ret_fast_syscall @ return address
- ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
+ invoke_syscall tbl, scno, r10, __ret_fast_syscall
add r1, sp, #S_OFF
2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
@@ -234,53 +285,39 @@ local_restart:
* current task.
*/
9001:
- sub lr, lr, #4
+ sub lr, saved_pc, #4
str lr, [sp, #S_PC]
+ get_thread_info tsk
b ret_fast_syscall
#endif
ENDPROC(vector_swi)
+ .ltorg
/*
* This is the really slow path. We're going to be doing
* context switches, and waiting for our parent to respond.
*/
__sys_trace:
- mov r1, scno
add r0, sp, #S_OFF
bl syscall_trace_enter
-
- badr lr, __sys_trace_return @ return address
- mov scno, r0 @ syscall number (possibly new)
- add r1, sp, #S_R0 + S_OFF @ pointer to regs
- cmp scno, #NR_syscalls @ check upper syscall limit
- ldmccia r1, {r0 - r6} @ have to reload r0 - r6
- stmccia sp, {r4, r5} @ and update the stack args
- ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
+ mov scno, r0
+ invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1
cmp scno, #-1 @ skip the syscall?
bne 2b
add sp, sp, #S_OFF @ restore stack
- b ret_slow_syscall
-__sys_trace_return:
- str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
+__sys_trace_return_nosave:
+ enable_irq_notrace
mov r0, sp
bl syscall_trace_exit
b ret_slow_syscall
-__sys_trace_return_nosave:
- enable_irq_notrace
+__sys_trace_return:
+ str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
mov r0, sp
bl syscall_trace_exit
b ret_slow_syscall
- .align 5
-#ifdef CONFIG_ALIGNMENT_TRAP
- .type __cr_alignment, #object
-__cr_alignment:
- .word cr_alignment
-#endif
- .ltorg
-
.macro syscall_table_start, sym
.equ __sys_nr, 0
.type \sym, #object
@@ -308,20 +345,19 @@ ENTRY(\sym)
.size \sym, . - \sym
.endm
-#define NATIVE(nr, func) syscall nr, func
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
+#define __SYSCALL(nr, func) syscall nr, func
/*
* This is the syscall table declaration for native ABI syscalls.
* With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
*/
syscall_table_start sys_call_table
-#define COMPAT(nr, native, compat) syscall nr, native
#ifdef CONFIG_AEABI
#include <calls-eabi.S>
#else
#include <calls-oabi.S>
#endif
-#undef COMPAT
syscall_table_end sys_call_table
/*============================================================================
@@ -333,7 +369,11 @@ sys_syscall:
bic scno, r0, #__NR_OABI_SYSCALL_BASE
cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
cmpne scno, #NR_syscalls @ check range
- stmloia sp, {r5, r6} @ shuffle args
+#ifdef CONFIG_CPU_SPECTRE
+ movhs scno, #0
+ csdb
+#endif
+ stmialo sp, {r5, r6} @ shuffle args
movlo r0, r1
movlo r1, r2
movlo r2, r3
@@ -371,17 +411,8 @@ ENDPROC(sys_fstatfs64_wrapper)
* offset, we return EINVAL.
*/
sys_mmap2:
-#if PAGE_SHIFT > 12
- tst r5, #PGOFF_MASK
- moveq r5, r5, lsr #PAGE_SHIFT - 12
- streq r5, [sp, #4]
- beq sys_mmap_pgoff
- mov r0, #-EINVAL
- ret lr
-#else
str r5, [sp, #4]
b sys_mmap_pgoff
-#endif
ENDPROC(sys_mmap2)
#ifdef CONFIG_OABI_COMPAT
@@ -424,7 +455,8 @@ ENDPROC(sys_oabi_readahead)
* using the compatibility syscall entries.
*/
syscall_table_start sys_oabi_call_table
-#define COMPAT(nr, native, compat) syscall nr, compat
+#undef __SYSCALL_WITH_COMPAT
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat)
#include <calls-oabi.S>
syscall_table_end sys_oabi_call_table
diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S
index efcd9f25a14b..e24ee559af81 100644
--- a/arch/arm/kernel/entry-ftrace.S
+++ b/arch/arm/kernel/entry-ftrace.S
@@ -1,8 +1,4 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
#include <asm/assembler.h>
#include <asm/ftrace.h>
@@ -15,23 +11,8 @@
* start of every function. In mcount, apart from the function's address (in
* lr), we need to get hold of the function's caller's address.
*
- * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
- *
- * bl mcount
- *
- * These versions have the limitation that in order for the mcount routine to
- * be able to determine the function's caller's address, an APCS-style frame
- * pointer (which is set up with something like the code below) is required.
- *
- * mov ip, sp
- * push {fp, ip, lr, pc}
- * sub fp, ip, #4
- *
- * With EABI, these frame pointers are not available unless -mapcs-frame is
- * specified, and if building as Thumb-2, not even then.
- *
- * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
- * with call sites like:
+ * Newer GCCs (4.4+) solve this problem by using a version of mcount with call
+ * sites like:
*
* push {lr}
* bl __gnu_mcount_nc
@@ -41,22 +22,12 @@
* mcount can be thought of as a function called in the middle of a subroutine
* call. As such, it needs to be transparent for both the caller and the
* callee: the original lr needs to be restored when leaving mcount, and no
- * registers should be clobbered. (In the __gnu_mcount_nc implementation, we
- * clobber the ip register. This is OK because the ARM calling convention
- * allows it to be clobbered in subroutines and doesn't use it to hold
- * parameters.)
+ * registers should be clobbered.
*
- * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
- * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
- * arch/arm/kernel/ftrace.c).
+ * When using dynamic ftrace, we patch out the mcount call by a "add sp, #4"
+ * instead of the __gnu_mcount_nc call (see arch/arm/kernel/ftrace.c).
*/
-#ifndef CONFIG_OLD_MCOUNT
-#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
-#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
-#endif
-#endif
-
.macro mcount_adjust_addr rd, rn
bic \rd, \rn, #1 @ clear the Thumb bit if present
sub \rd, \rd, #MCOUNT_INSN_SIZE
@@ -64,23 +35,20 @@
.macro __mcount suffix
mcount_enter
- ldr r0, =ftrace_trace_function
- ldr r2, [r0]
- adr r0, .Lftrace_stub
+ ldr_va r2, ftrace_trace_function
+ badr r0, .Lftrace_stub
cmp r0, r2
bne 1f
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- ldr r1, =ftrace_graph_return
- ldr r2, [r1]
- cmp r0, r2
- bne ftrace_graph_caller\suffix
-
- ldr r1, =ftrace_graph_entry
- ldr r2, [r1]
- ldr r0, =ftrace_graph_entry_stub
- cmp r0, r2
- bne ftrace_graph_caller\suffix
+ ldr_va r2, ftrace_graph_return
+ cmp r0, r2
+ bne ftrace_graph_caller\suffix
+
+ ldr_va r2, ftrace_graph_entry
+ mov_l r0, ftrace_graph_entry_stub
+ cmp r0, r2
+ bne ftrace_graph_caller\suffix
#endif
mcount_exit
@@ -96,29 +64,27 @@
.macro __ftrace_regs_caller
- sub sp, sp, #8 @ space for PC and CPSR OLD_R0,
+ str lr, [sp, #-8]! @ store LR as PC and make space for CPSR/OLD_R0,
@ OLD_R0 will overwrite previous LR
- add ip, sp, #12 @ move in IP the value of SP as it was
- @ before the push {lr} of the mcount mechanism
+ ldr lr, [sp, #8] @ get previous LR
- str lr, [sp, #0] @ store LR instead of PC
+ str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR
- ldr lr, [sp, #8] @ get previous LR
+ str lr, [sp, #-4]! @ store previous LR as LR
- str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR
+ add lr, sp, #16 @ move in LR the value of SP as it was
+ @ before the push {lr} of the mcount mechanism
- stmdb sp!, {ip, lr}
- stmdb sp!, {r0-r11, lr}
+ push {r0-r11, ip, lr}
@ stack content at this point:
@ 0 4 48 52 56 60 64 68 72
- @ R0 | R1 | ... | LR | SP + 4 | previous LR | LR | PSR | OLD_R0 |
+ @ R0 | R1 | ... | IP | SP + 4 | previous LR | LR | PSR | OLD_R0 |
- mov r3, sp @ struct pt_regs*
+ mov r3, sp @ struct pt_regs*
- ldr r2, =function_trace_op
- ldr r2, [r2] @ pointer to the current
+ ldr_va r2, function_trace_op @ pointer to the current
@ function tracing op
ldr r1, [sp, #S_LR] @ lr of instrumented func
@@ -134,35 +100,37 @@ ftrace_regs_call:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_regs_call
ftrace_graph_regs_call:
- mov r0, r0
+ARM( mov r0, r0 )
+THUMB( nop.w )
#endif
@ pop saved regs
- ldmia sp!, {r0-r12} @ restore r0 through r12
- ldr ip, [sp, #8] @ restore PC
- ldr lr, [sp, #4] @ restore LR
- ldr sp, [sp, #0] @ restore SP
- mov pc, ip @ return
+ pop {r0-r11, ip, lr} @ restore r0 through r12
+ ldr lr, [sp], #4 @ restore LR
+ ldr pc, [sp], #12
.endm
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.macro __ftrace_graph_regs_caller
- sub r0, fp, #4 @ lr of instrumented routine (parent)
+#ifdef CONFIG_UNWINDER_FRAME_POINTER
+ sub r0, fp, #4 @ lr of instrumented routine (parent)
+#else
+ add r0, sp, #S_LR
+#endif
@ called from __ftrace_regs_caller
- ldr r1, [sp, #S_PC] @ instrumented routine (func)
+ ldr r1, [sp, #S_PC] @ instrumented routine (func)
mcount_adjust_addr r1, r1
- mov r2, fp @ frame pointer
+ mov r2, fpreg @ frame pointer
+ add r3, sp, #PT_REGS_SIZE
bl prepare_ftrace_return
@ pop registers saved in ftrace_regs_caller
- ldmia sp!, {r0-r12} @ restore r0 through r12
- ldr ip, [sp, #8] @ restore PC
- ldr lr, [sp, #4] @ restore LR
- ldr sp, [sp, #0] @ restore SP
- mov pc, ip @ return
+ pop {r0-r11, ip, lr} @ restore r0 through r12
+ ldr lr, [sp], #4 @ restore LR
+ ldr pc, [sp], #12
.endm
#endif
@@ -175,8 +143,7 @@ ftrace_graph_regs_call:
mcount_adjust_addr r0, lr @ instrumented function
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- ldr r2, =function_trace_op
- ldr r2, [r2] @ pointer to the current
+ ldr_va r2, function_trace_op @ pointer to the current
@ function tracing op
mov r3, #0 @ regs is NULL
#endif
@@ -188,14 +155,19 @@ ftrace_call\suffix:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call\suffix
ftrace_graph_call\suffix:
- mov r0, r0
+ARM( mov r0, r0 )
+THUMB( nop.w )
#endif
mcount_exit
.endm
.macro __ftrace_graph_caller
+#ifdef CONFIG_UNWINDER_FRAME_POINTER
sub r0, fp, #4 @ &lr of instrumented routine (&parent)
+#else
+ add r0, sp, #20
+#endif
#ifdef CONFIG_DYNAMIC_FTRACE
@ called from __ftrace_caller, saved in mcount_enter
ldr r1, [sp, #16] @ instrumented routine (func)
@@ -204,56 +176,12 @@ ftrace_graph_call\suffix:
@ called from __mcount, untouched in lr
mcount_adjust_addr r1, lr @ instrumented routine (func)
#endif
- mov r2, fp @ frame pointer
+ mov r2, fpreg @ frame pointer
+ add r3, sp, #24
bl prepare_ftrace_return
mcount_exit
.endm
-#ifdef CONFIG_OLD_MCOUNT
-/*
- * mcount
- */
-
-.macro mcount_enter
- stmdb sp!, {r0-r3, lr}
-.endm
-
-.macro mcount_get_lr reg
- ldr \reg, [fp, #-4]
-.endm
-
-.macro mcount_exit
- ldr lr, [fp, #-4]
- ldmia sp!, {r0-r3, pc}
-.endm
-
-ENTRY(mcount)
-#ifdef CONFIG_DYNAMIC_FTRACE
- stmdb sp!, {lr}
- ldr lr, [fp, #-4]
- ldmia sp!, {pc}
-#else
- __mcount _old
-#endif
-ENDPROC(mcount)
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-ENTRY(ftrace_caller_old)
- __ftrace_caller _old
-ENDPROC(ftrace_caller_old)
-#endif
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-ENTRY(ftrace_graph_caller_old)
- __ftrace_graph_caller
-ENDPROC(ftrace_graph_caller_old)
-#endif
-
-.purgem mcount_enter
-.purgem mcount_get_lr
-.purgem mcount_exit
-#endif
-
/*
* __gnu_mcount_nc
*/
@@ -273,16 +201,17 @@ ENDPROC(ftrace_graph_caller_old)
.endm
.macro mcount_exit
- ldmia sp!, {r0-r3, ip, lr}
- ret ip
+ ldmia sp!, {r0-r3}
+ ldr lr, [sp, #4]
+ ldr pc, [sp], #8
.endm
ENTRY(__gnu_mcount_nc)
UNWIND(.fnstart)
#ifdef CONFIG_DYNAMIC_FTRACE
- mov ip, lr
- ldmia sp!, {lr}
- ret ip
+ push {lr}
+ ldr lr, [sp, #4]
+ ldr pc, [sp], #8
#else
__mcount
#endif
@@ -327,17 +256,47 @@ ENDPROC(ftrace_graph_regs_caller)
.purgem mcount_exit
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- .globl return_to_handler
-return_to_handler:
- stmdb sp!, {r0-r3}
- mov r0, fp @ frame pointer
+ENTRY(return_to_handler)
+ mov ip, sp @ sp at exit of instrumented routine
+ sub sp, #PT_REGS_SIZE
+ str r0, [sp, #S_R0]
+ str r1, [sp, #S_R1]
+ str r2, [sp, #S_R2]
+ str r3, [sp, #S_R3]
+ str ip, [sp, #S_FP]
+ mov r0, sp
bl ftrace_return_to_handler
- mov lr, r0 @ r0 has real ret addr
- ldmia sp!, {r0-r3}
+ mov lr, r0 @ r0 has real ret addr
+ ldr r3, [sp, #S_R3]
+ ldr r2, [sp, #S_R2]
+ ldr r1, [sp, #S_R1]
+ ldr r0, [sp, #S_R0]
+ add sp, sp, #PT_REGS_SIZE @ restore stack pointer
ret lr
+ENDPROC(return_to_handler)
#endif
ENTRY(ftrace_stub)
.Lftrace_stub:
ret lr
ENDPROC(ftrace_stub)
+
+ENTRY(ftrace_stub_graph)
+ ret lr
+ENDPROC(ftrace_stub_graph)
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+ __INIT
+
+ .macro init_tramp, dst:req
+ENTRY(\dst\()_from_init)
+ ldr pc, =\dst
+ENDPROC(\dst\()_from_init)
+ .endm
+
+ init_tramp ftrace_caller
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ init_tramp ftrace_regs_caller
+#endif
+#endif
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 6391728c8f03..99411fa91350 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/init.h>
#include <linux/linkage.h>
@@ -5,6 +6,7 @@
#include <asm/asm-offsets.h>
#include <asm/errno.h>
#include <asm/thread_info.h>
+#include <asm/uaccess-asm.h>
#include <asm/v7m.h>
@ Bad Abort numbers
@@ -46,8 +48,7 @@
.macro alignment_trap, rtmp1, rtmp2, label
#ifdef CONFIG_ALIGNMENT_TRAP
mrc p15, 0, \rtmp2, c1, c0, 0
- ldr \rtmp1, \label
- ldr \rtmp1, [\rtmp1]
+ ldr_va \rtmp1, \label
teq \rtmp1, \rtmp2
mcrne p15, 0, \rtmp1, c1, c0, 0
#endif
@@ -126,7 +127,8 @@
*/
.macro v7m_exception_slow_exit ret_r0
cpsid i
- ldr lr, =EXC_RET_THREADMODE_PROCESSSTACK
+ ldr lr, =exc_ret
+ ldr lr, [lr]
@ read original r12, sp, lr, pc and xPSR
add r12, sp, #S_IP
@@ -215,9 +217,7 @@
blne trace_hardirqs_off
#endif
.endif
- ldr r1, [sp, #SVC_ADDR_LIMIT]
- uaccess_restore
- str r1, [tsk, #TI_ADDR_LIMIT]
+ uaccess_exit tsk, r0, r1
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode SVC restore
@@ -261,9 +261,7 @@
@ on the stack remains correct).
@
.macro svc_exit_via_fiq
- ldr r1, [sp, #SVC_ADDR_LIMIT]
- uaccess_restore
- str r1, [tsk, #TI_ADDR_LIMIT]
+ uaccess_exit tsk, r0, r1
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode restore
mov r0, sp
@@ -293,12 +291,28 @@
.macro restore_user_regs, fast = 0, offset = 0
+#if defined(CONFIG_CPU_32v6K) && \
+ (!defined(CONFIG_CPU_V6) || defined(CONFIG_SMP))
+#ifdef CONFIG_CPU_V6
+ALT_SMP(nop)
+ALT_UP_B(.L1_\@)
+#endif
+ @ The TLS register update is deferred until return to user space so we
+ @ can use it for other things while running in the kernel
+ mrc p15, 0, r1, c13, c0, 3 @ get current_thread_info pointer
+ ldr r1, [r1, #TI_TP_VALUE]
+ mcr p15, 0, r1, c13, c0, 3 @ set TLS register
+.L1_\@:
+#endif
+
uaccess_enable r1, isb=0
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode restore
mov r2, sp
ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
ldr lr, [r2, #\offset + S_PC]! @ get pc
+ tst r1, #PSR_I_BIT | 0x0f
+ bne 1f
msr spsr_cxsf, r1 @ save in spsr_svc
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
@ We must avoid clrex due to Cortex-A15 erratum #830321
@@ -313,6 +327,7 @@
@ after ldm {}^
add sp, sp, #\offset + PT_REGS_SIZE
movs pc, lr @ return & move spsr_svc into cpsr
+1: bug "Returning to usermode but unexpected PSR bits set?", \@
#elif defined(CONFIG_CPU_V7M)
@ V7M restore.
@ Note that we don't need to do clrex here as clearing the local
@@ -328,6 +343,8 @@
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
ldr lr, [sp, #\offset + S_PC] @ get pc
add sp, sp, #\offset + S_SP
+ tst r1, #PSR_I_BIT | 0x0f
+ bne 1f
msr spsr_cxsf, r1 @ save in spsr_svc
@ We must avoid clrex due to Cortex-A15 erratum #830321
@@ -340,6 +357,7 @@
.endif
add sp, sp, #PT_REGS_SIZE - S_SP
movs pc, lr @ return & move spsr_svc into cpsr
+1: bug "Returning to usermode but unexpected PSR bits set?", \@
#endif /* !CONFIG_THUMB2_KERNEL */
.endm
@@ -348,26 +366,51 @@
* between user and kernel mode.
*/
.macro ct_user_exit, save = 1
-#ifdef CONFIG_CONTEXT_TRACKING
+#ifdef CONFIG_CONTEXT_TRACKING_USER
.if \save
stmdb sp!, {r0-r3, ip, lr}
- bl context_tracking_user_exit
+ bl user_exit_callable
ldmia sp!, {r0-r3, ip, lr}
.else
- bl context_tracking_user_exit
+ bl user_exit_callable
.endif
#endif
.endm
.macro ct_user_enter, save = 1
-#ifdef CONFIG_CONTEXT_TRACKING
+#ifdef CONFIG_CONTEXT_TRACKING_USER
.if \save
stmdb sp!, {r0-r3, ip, lr}
- bl context_tracking_user_enter
+ bl user_enter_callable
ldmia sp!, {r0-r3, ip, lr}
.else
- bl context_tracking_user_enter
+ bl user_enter_callable
+ .endif
+#endif
+ .endm
+
+ .macro invoke_syscall, table, nr, tmp, ret, reload=0
+#ifdef CONFIG_CPU_SPECTRE
+ mov \tmp, \nr
+ cmp \tmp, #NR_syscalls @ check upper syscall limit
+ movcs \tmp, #0
+ csdb
+ badr lr, \ret @ return address
+ .if \reload
+ add r1, sp, #S_R0 + S_OFF @ pointer to regs
+ ldmiacc r1, {r0 - r6} @ reload r0-r6
+ stmiacc sp, {r4, r5} @ update stack arguments
+ .endif
+ ldrcc pc, [\table, \tmp, lsl #2] @ call sys_* routine
+#else
+ cmp \nr, #NR_syscalls @ check upper syscall limit
+ badr lr, \ret @ return address
+ .if \reload
+ add r1, sp, #S_R0 + S_OFF @ pointer to regs
+ ldmiacc r1, {r0 - r6} @ reload r0-r6
+ stmiacc sp, {r4, r5} @ update stack arguments
.endif
+ ldrcc pc, [\table, \nr, lsl #2] @ call sys_* routine
#endif
.endm
@@ -385,3 +428,40 @@ scno .req r7 @ syscall number
tbl .req r8 @ syscall table pointer
why .req r8 @ Linux syscall (!= 0)
tsk .req r9 @ current thread_info
+
+ .macro do_overflow_check, frame_size:req
+#ifdef CONFIG_VMAP_STACK
+ @
+ @ Test whether the SP has overflowed. Task and IRQ stacks are aligned
+ @ so that SP & BIT(THREAD_SIZE_ORDER + PAGE_SHIFT) should always be
+ @ zero.
+ @
+ARM( tst sp, #1 << (THREAD_SIZE_ORDER + PAGE_SHIFT) )
+THUMB( tst r1, #1 << (THREAD_SIZE_ORDER + PAGE_SHIFT) )
+THUMB( it ne )
+ bne .Lstack_overflow_check\@
+
+ .pushsection .text
+.Lstack_overflow_check\@:
+ @
+ @ The stack pointer is not pointing to a valid vmap'ed stack, but it
+ @ may be pointing into the linear map instead, which may happen if we
+ @ are already running from the overflow stack. We cannot detect overflow
+ @ in such cases so just carry on.
+ @
+ str ip, [r0, #12] @ Stash IP on the mode stack
+ ldr_va ip, high_memory @ Start of VMALLOC space
+ARM( cmp sp, ip ) @ SP in vmalloc space?
+THUMB( cmp r1, ip )
+THUMB( itt lo )
+ ldrlo ip, [r0, #12] @ Restore IP
+ blo .Lout\@ @ Carry on
+
+THUMB( sub r1, sp, r1 ) @ Restore original R1
+THUMB( sub sp, r1 ) @ Restore original SP
+ add sp, sp, #\frame_size @ Undo svc_entry's SP change
+ b __bad_stack @ Handle VMAP stack overflow
+ .popsection
+.Lout\@:
+#endif
+ .endm
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
index abcf47848525..52bacf07ba16 100644
--- a/arch/arm/kernel/entry-v7m.S
+++ b/arch/arm/kernel/entry-v7m.S
@@ -1,15 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/kernel/entry-v7m.S
*
* Copyright (C) 2008 ARM Ltd.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Low-level vector interface routines for the ARMv7-M architecture
*/
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/glue.h>
#include <asm/thread_notify.h>
#include <asm/v7m.h>
@@ -26,7 +23,7 @@ __invalid_entry:
adr r0, strerr
mrs r1, ipsr
mov r2, lr
- bl printk
+ bl _printk
#endif
mov r0, sp
bl show_regs
@@ -42,16 +39,25 @@ __irq_entry:
@
@ Invoke the IRQ handler
@
- mrs r0, ipsr
- ldr r1, =V7M_xPSR_EXCEPTIONNO
- and r0, r1
- sub r0, #16
- mov r1, sp
- stmdb sp!, {lr}
- @ routine called with r0 = irq number, r1 = struct pt_regs *
- bl nvic_handle_irq
-
- pop {lr}
+ mov r0, sp
+ ldr_this_cpu sp, irq_stack_ptr, r1, r2
+
+ @
+ @ If we took the interrupt while running in the kernel, we may already
+ @ be using the IRQ stack, so revert to the original value in that case.
+ @
+ subs r2, sp, r0 @ SP above bottom of IRQ stack?
+ rsbscs r2, r2, #THREAD_SIZE @ ... and below the top?
+ movcs sp, r0
+
+ push {r0, lr} @ preserve LR and original SP
+
+ @ routine called with r0 = struct pt_regs *
+ bl generic_handle_arch_irq
+
+ pop {r0, lr}
+ mov sp, r0
+
@
@ Check for any pending work if returning to user
@
@@ -62,7 +68,7 @@ __irq_entry:
get_thread_info tsk
ldr r2, [tsk, #TI_FLAGS]
- tst r2, #_TIF_WORK_MASK
+ movs r2, r2, lsl #16
beq 2f @ no work pending
mov r0, #V7M_SCB_ICSR_PENDSVSET
str r0, [r1, V7M_SCB_ICSR] @ raise PendSV
@@ -104,15 +110,17 @@ ENTRY(__switch_to)
str sp, [ip], #4
str lr, [ip], #4
mov r5, r0
+ mov r6, r2 @ Preserve 'next'
add r4, r2, #TI_CPU_SAVE
ldr r0, =thread_notify_head
mov r1, #THREAD_NOTIFY_SWITCH
bl atomic_notifier_call_chain
- mov ip, r4
mov r0, r5
- ldmia ip!, {r4 - r11} @ Load all regs saved previously
- ldr sp, [ip]
- ldr pc, [ip, #4]!
+ mov r1, r6
+ ldmia r4, {r4 - r12, lr} @ Load all regs saved previously
+ set_current r1, r2
+ mov sp, ip
+ bx lr
.fnend
ENDPROC(__switch_to)
@@ -146,3 +154,7 @@ ENTRY(vector_table)
.rept CONFIG_CPU_V7M_NUM_IRQ
.long __irq_entry @ External Interrupts
.endr
+ .align 2
+ .globl exc_ret
+exc_ret:
+ .space 4
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index 059c3da0fee3..d2c8e5313539 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/arm/kernel/fiq.c
*
@@ -44,6 +45,7 @@
#include <asm/cacheflush.h>
#include <asm/cp15.h>
#include <asm/fiq.h>
+#include <asm/mach/irq.h>
#include <asm/irq.h>
#include <asm/traps.h>
@@ -97,8 +99,8 @@ void set_fiq_handler(void *start, unsigned int length)
memcpy(base + offset, start, length);
if (!cache_is_vipt_nonaliasing())
- flush_icache_range((unsigned long)base + offset, offset +
- length);
+ flush_icache_range((unsigned long)base + offset,
+ (unsigned long)base + offset + length);
flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
}
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 5617932a83df..845acf9ce21e 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -22,11 +22,24 @@
#include <asm/ftrace.h>
#include <asm/insn.h>
#include <asm/set_memory.h>
+#include <asm/stacktrace.h>
+#include <asm/text-patching.h>
+/*
+ * The compiler emitted profiling hook consists of
+ *
+ * PUSH {LR}
+ * BL __gnu_mcount_nc
+ *
+ * To turn this combined sequence into a NOP, we need to restore the value of
+ * SP before the PUSH. Let's use an ADD rather than a POP into LR, as LR is not
+ * modified anyway, and reloading LR from memory is highly likely to be less
+ * efficient.
+ */
#ifdef CONFIG_THUMB2_KERNEL
-#define NOP 0xf85deb04 /* pop.w {lr} */
+#define NOP 0xf10d0d04 /* add.w sp, sp, #4 */
#else
-#define NOP 0xe8bd4000 /* pop {lr} */
+#define NOP 0xe28dd004 /* add sp, sp, #4 */
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -35,9 +48,7 @@ static int __ftrace_modify_code(void *data)
{
int *command = data;
- set_kernel_text_rw();
ftrace_modify_all_code(*command);
- set_kernel_text_ro();
return 0;
}
@@ -47,58 +58,41 @@ void arch_ftrace_update_code(int command)
stop_machine(__ftrace_modify_code, &command, NULL);
}
-#ifdef CONFIG_OLD_MCOUNT
-#define OLD_MCOUNT_ADDR ((unsigned long) mcount)
-#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
-
-#define OLD_NOP 0xe1a00000 /* mov r0, r0 */
-
-static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
-{
- return rec->arch.old_mcount ? OLD_NOP : NOP;
-}
-
-static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
-{
- if (!rec->arch.old_mcount)
- return addr;
-
- if (addr == MCOUNT_ADDR)
- addr = OLD_MCOUNT_ADDR;
- else if (addr == FTRACE_ADDR)
- addr = OLD_FTRACE_ADDR;
-
- return addr;
-}
-#else
static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
{
return NOP;
}
-static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
+void ftrace_caller_from_init(void);
+void ftrace_regs_caller_from_init(void);
+
+static unsigned long __ref adjust_address(struct dyn_ftrace *rec,
+ unsigned long addr)
{
- return addr;
+ if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE) ||
+ system_state >= SYSTEM_FREEING_INITMEM ||
+ likely(!is_kernel_inittext(rec->ip)))
+ return addr;
+ if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) ||
+ addr == (unsigned long)&ftrace_caller)
+ return (unsigned long)&ftrace_caller_from_init;
+ return (unsigned long)&ftrace_regs_caller_from_init;
}
-#endif
-int ftrace_arch_code_modify_prepare(void)
+void ftrace_arch_code_modify_prepare(void)
{
- set_all_modules_text_rw();
- return 0;
}
-int ftrace_arch_code_modify_post_process(void)
+void ftrace_arch_code_modify_post_process(void)
{
- set_all_modules_text_ro();
/* Make sure any TLB misses during machine stop are cleared. */
flush_tlb_all();
- return 0;
}
-static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
+static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr,
+ bool warn)
{
- return arm_gen_branch_link(pc, addr);
+ return arm_gen_branch_link(pc, addr, warn);
}
static int ftrace_modify_code(unsigned long pc, unsigned long old,
@@ -106,26 +100,21 @@ static int ftrace_modify_code(unsigned long pc, unsigned long old,
{
unsigned long replaced;
- if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
+ if (IS_ENABLED(CONFIG_THUMB2_KERNEL))
old = __opcode_to_mem_thumb32(old);
- new = __opcode_to_mem_thumb32(new);
- } else {
+ else
old = __opcode_to_mem_arm(old);
- new = __opcode_to_mem_arm(new);
- }
if (validate) {
- if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
+ if (copy_from_kernel_nofault(&replaced, (void *)pc,
+ MCOUNT_INSN_SIZE))
return -EFAULT;
if (replaced != old)
return -EINVAL;
}
- if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE))
- return -EPERM;
-
- flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
+ __patch_text((void *)pc, new);
return 0;
}
@@ -137,23 +126,14 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
int ret;
pc = (unsigned long)&ftrace_call;
- new = ftrace_call_replace(pc, (unsigned long)func);
+ new = ftrace_call_replace(pc, (unsigned long)func, true);
ret = ftrace_modify_code(pc, 0, new, false);
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
if (!ret) {
pc = (unsigned long)&ftrace_regs_call;
- new = ftrace_call_replace(pc, (unsigned long)func);
-
- ret = ftrace_modify_code(pc, 0, new, false);
- }
-#endif
-
-#ifdef CONFIG_OLD_MCOUNT
- if (!ret) {
- pc = (unsigned long)&ftrace_call_old;
- new = ftrace_call_replace(pc, (unsigned long)func);
+ new = ftrace_call_replace(pc, (unsigned long)func, true);
ret = ftrace_modify_code(pc, 0, new, false);
}
@@ -166,10 +146,22 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long new, old;
unsigned long ip = rec->ip;
+ unsigned long aaddr = adjust_address(rec, addr);
+ struct module *mod = NULL;
+
+#ifdef CONFIG_ARM_MODULE_PLTS
+ mod = rec->arch.mod;
+#endif
old = ftrace_nop_replace(rec);
- new = ftrace_call_replace(ip, adjust_address(rec, addr));
+ new = ftrace_call_replace(ip, aaddr, !mod);
+#ifdef CONFIG_ARM_MODULE_PLTS
+ if (!new && mod) {
+ aaddr = get_module_plt(mod, ip, aaddr);
+ new = ftrace_call_replace(ip, aaddr, true);
+ }
+#endif
return ftrace_modify_code(rec->ip, old, new, true);
}
@@ -182,9 +174,9 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long new, old;
unsigned long ip = rec->ip;
- old = ftrace_call_replace(ip, adjust_address(rec, old_addr));
+ old = ftrace_call_replace(ip, adjust_address(rec, old_addr), true);
- new = ftrace_call_replace(ip, adjust_address(rec, addr));
+ new = ftrace_call_replace(ip, adjust_address(rec, addr), true);
return ftrace_modify_code(rec->ip, old, new, true);
}
@@ -194,64 +186,89 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
+ unsigned long aaddr = adjust_address(rec, addr);
unsigned long ip = rec->ip;
unsigned long old;
unsigned long new;
int ret;
- old = ftrace_call_replace(ip, adjust_address(rec, addr));
- new = ftrace_nop_replace(rec);
- ret = ftrace_modify_code(ip, old, new, true);
-
-#ifdef CONFIG_OLD_MCOUNT
- if (ret == -EINVAL && addr == MCOUNT_ADDR) {
- rec->arch.old_mcount = true;
+#ifdef CONFIG_ARM_MODULE_PLTS
+ /* mod is only supplied during module loading */
+ if (!mod)
+ mod = rec->arch.mod;
+ else
+ rec->arch.mod = mod;
+#endif
- old = ftrace_call_replace(ip, adjust_address(rec, addr));
- new = ftrace_nop_replace(rec);
- ret = ftrace_modify_code(ip, old, new, true);
+ old = ftrace_call_replace(ip, aaddr,
+ !IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || !mod);
+#ifdef CONFIG_ARM_MODULE_PLTS
+ if (!old && mod) {
+ aaddr = get_module_plt(mod, ip, aaddr);
+ old = ftrace_call_replace(ip, aaddr, true);
}
#endif
- return ret;
-}
+ new = ftrace_nop_replace(rec);
+ /*
+ * Locations in .init.text may call __gnu_mcount_mc via a linker
+ * emitted veneer if they are too far away from its implementation, and
+ * so validation may fail spuriously in such cases. Let's work around
+ * this by omitting those from validation.
+ */
+ ret = ftrace_modify_code(ip, old, new, !is_kernel_inittext(ip));
-int __init ftrace_dyn_arch_init(void)
-{
- return 0;
+ return ret;
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+asmlinkage
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
- unsigned long frame_pointer)
+ unsigned long frame_pointer,
+ unsigned long stack_pointer)
{
unsigned long return_hooker = (unsigned long) &return_to_handler;
- struct ftrace_graph_ent trace;
unsigned long old;
- int err;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
+err_out:
return;
+ if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER)) {
+ /*
+ * Usually, the stack frames are contiguous in memory but cases
+ * have been observed where the next stack frame does not live
+ * at 'frame_pointer + 4' as this code used to assume.
+ *
+ * Instead, dereference the field in the stack frame that
+ * stores the SP of the calling frame: to avoid unbounded
+ * recursion, this cannot involve any ftrace instrumented
+ * functions, so use the __get_kernel_nofault() primitive
+ * directly.
+ */
+ __get_kernel_nofault(&frame_pointer,
+ (unsigned long *)(frame_pointer - 8),
+ unsigned long, err_out);
+ } else {
+ struct stackframe frame = {
+ .fp = frame_pointer,
+ .sp = stack_pointer,
+ .lr = self_addr,
+ .pc = self_addr,
+ };
+ if (unwind_frame(&frame) < 0)
+ return;
+ if (frame.lr != self_addr)
+ parent = frame.lr_addr;
+ frame_pointer = frame.sp;
+ }
+
old = *parent;
*parent = return_hooker;
- trace.func = self_addr;
- trace.depth = current->curr_ret_stack + 1;
-
- /* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace)) {
- *parent = old;
- return;
- }
-
- err = ftrace_push_return_trace(old, self_addr, &trace.depth,
- frame_pointer, NULL);
- if (err == -EBUSY) {
+ if (function_graph_enter(old, self_addr, frame_pointer, NULL))
*parent = old;
- return;
- }
}
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -267,7 +284,7 @@ static int __ftrace_modify_caller(unsigned long *callsite,
unsigned long caller_fn = (unsigned long) func;
unsigned long pc = (unsigned long) callsite;
unsigned long branch = arm_gen_branch(pc, caller_fn);
- unsigned long nop = 0xe1a00000; /* mov r0, r0 */
+ unsigned long nop = arm_gen_nop();
unsigned long old = enable ? nop : branch;
unsigned long new = enable ? branch : nop;
@@ -290,13 +307,6 @@ static int ftrace_modify_graph_caller(bool enable)
#endif
-#ifdef CONFIG_OLD_MCOUNT
- if (!ret)
- ret = __ftrace_modify_caller(&ftrace_graph_call_old,
- ftrace_graph_caller_old,
- enable);
-#endif
-
return ret;
}
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 8733012d231f..42cae73fcc19 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/kernel/head-common.S
*
* Copyright (C) 1994-2002 Russell King
* Copyright (c) 2003 ARM Limited
* All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <asm/assembler.h>
@@ -72,56 +68,89 @@ ENDPROC(__vet_atags)
* The following fragment of code is executed with the MMU on in MMU mode,
* and uses absolute addresses; this is not position independent.
*
- * r0 = cp#15 control register
+ * r0 = cp#15 control register (exc_ret for M-class)
* r1 = machine ID
* r2 = atags/dtb pointer
* r9 = processor ID
*/
__INIT
__mmap_switched:
- adr r3, __mmap_switched_data
-
- ldmia r3!, {r4, r5, r6, r7}
- cmp r4, r5 @ Copy data segment if needed
-1: cmpne r5, r6
- ldrne fp, [r4], #4
- strne fp, [r5], #4
- bne 1b
-
- mov fp, #0 @ Clear BSS (and zero fp)
-1: cmp r6, r7
- strcc fp, [r6],#4
- bcc 1b
-
- ARM( ldmia r3, {r4, r5, r6, r7, sp})
- THUMB( ldmia r3, {r4, r5, r6, r7} )
- THUMB( ldr sp, [r3, #16] )
- str r9, [r4] @ Save processor ID
- str r1, [r5] @ Save machine type
- str r2, [r6] @ Save atags pointer
- cmp r7, #0
- strne r0, [r7] @ Save control register values
+
+ mov r7, r1
+ mov r8, r2
+ mov r10, r0
+
+ adr r4, __mmap_switched_data
+ mov fp, #0
+
+#if defined(CONFIG_XIP_DEFLATED_DATA)
+ ARM( ldr sp, [r4], #4 )
+ THUMB( ldr sp, [r4] )
+ THUMB( add r4, #4 )
+ bl __inflate_kernel_data @ decompress .data to RAM
+ teq r0, #0
+ bne __error
+#elif defined(CONFIG_XIP_KERNEL)
+ ARM( ldmia r4!, {r0, r1, r2, sp} )
+ THUMB( ldmia r4!, {r0, r1, r2, r3} )
+ THUMB( mov sp, r3 )
+ sub r2, r2, r1
+ bl __memcpy @ copy .data to RAM
+#endif
+
+ ARM( ldmia r4!, {r0, r1, sp} )
+ THUMB( ldmia r4!, {r0, r1, r3} )
+ THUMB( mov sp, r3 )
+ sub r2, r1, r0
+ mov r1, #0
+ bl __memset @ clear .bss
+
+ adr_l r0, init_task @ get swapper task_struct
+ set_current r0, r1
+
+ ldmia r4, {r0, r1, r2, r3}
+ str r9, [r0] @ Save processor ID
+ str r7, [r1] @ Save machine type
+ str r8, [r2] @ Save atags pointer
+ cmp r3, #0
+ strne r10, [r3] @ Save control register values
+#ifdef CONFIG_KASAN
+ bl kasan_early_init
+#endif
+ mov lr, #0
b start_kernel
ENDPROC(__mmap_switched)
.align 2
.type __mmap_switched_data, %object
__mmap_switched_data:
- .long __data_loc @ r4
- .long _sdata @ r5
- .long __bss_start @ r6
- .long _end @ r7
- .long processor_id @ r4
- .long __machine_arch_type @ r5
- .long __atags_pointer @ r6
+#ifdef CONFIG_XIP_KERNEL
+#ifndef CONFIG_XIP_DEFLATED_DATA
+ .long _sdata @ r0
+ .long __data_loc @ r1
+ .long _edata_loc @ r2
+#endif
+ .long __bss_stop @ sp (temporary stack in .bss)
+#endif
+
+ .long __bss_start @ r0
+ .long __bss_stop @ r1
+ .long init_thread_union + THREAD_START_SP @ sp
+
+ .long processor_id @ r0
+ .long __machine_arch_type @ r1
+ .long __atags_pointer @ r2
#ifdef CONFIG_CPU_CP15
- .long cr_alignment @ r7
+ .long cr_alignment @ r3
#else
- .long 0 @ r7
+M_CLASS(.long exc_ret) @ r3
+AR_CLASS(.long 0) @ r3
#endif
- .long init_thread_union + THREAD_START_SP @ sp
.size __mmap_switched_data, . - __mmap_switched_data
+ __FINIT
+ .text
+
/*
* This provides a C-API version of __lookup_processor_type
*/
@@ -133,9 +162,6 @@ ENTRY(lookup_processor_type)
ldmfd sp!, {r4 - r6, r9, pc}
ENDPROC(lookup_processor_type)
- __FINIT
- .text
-
/*
* Read processor ID register (CP#15, CR0), and look up in the linker-built
* supported processor list. Note that we can't use the absolute addresses
@@ -150,11 +176,12 @@ ENDPROC(lookup_processor_type)
* r9 = cpuid (preserved)
*/
__lookup_processor_type:
- adr r3, __lookup_processor_type_data
- ldmia r3, {r4 - r6}
- sub r3, r3, r4 @ get offset between virt&phys
- add r5, r5, r3 @ convert virt addresses to
- add r6, r6, r3 @ physical address space
+ /*
+ * Look in <asm/procinfo.h> for information about the __proc_info
+ * structure.
+ */
+ adr_l r5, __proc_info_begin
+ adr_l r6, __proc_info_end
1: ldmia r5, {r3, r4} @ value, mask
and r4, r4, r9 @ mask wanted bits
teq r3, r4
@@ -166,17 +193,6 @@ __lookup_processor_type:
2: ret lr
ENDPROC(__lookup_processor_type)
-/*
- * Look in <asm/procinfo.h> for information about the __proc_info structure.
- */
- .align 2
- .type __lookup_processor_type_data, %object
-__lookup_processor_type_data:
- .long .
- .long __proc_info_begin
- .long __proc_info_end
- .size __lookup_processor_type_data, . - __lookup_processor_type_data
-
__error_lpae:
#ifdef CONFIG_DEBUG_LL
adr r0, str_lpae
diff --git a/arch/arm/kernel/head-inflate-data.c b/arch/arm/kernel/head-inflate-data.c
new file mode 100644
index 000000000000..225c0699a12c
--- /dev/null
+++ b/arch/arm/kernel/head-inflate-data.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * XIP kernel .data segment decompressor
+ *
+ * Created by: Nicolas Pitre, August 2017
+ * Copyright: (C) 2017 Linaro Limited
+ */
+
+#include <linux/init.h>
+#include <linux/zutil.h>
+#include "head.h"
+
+/* for struct inflate_state */
+#include "../../../lib/zlib_inflate/inftrees.h"
+#include "../../../lib/zlib_inflate/inflate.h"
+#include "../../../lib/zlib_inflate/infutil.h"
+
+/*
+ * This code is called very early during the boot process to decompress
+ * the .data segment stored compressed in ROM. Therefore none of the global
+ * variables are valid yet, hence no kernel services such as memory
+ * allocation is available. Everything must be allocated on the stack and
+ * we must avoid any global data access. We use a temporary stack located
+ * in the .bss area. The linker script makes sure the .bss is big enough
+ * to hold our stack frame plus some room for called functions.
+ *
+ * We mimic the code in lib/decompress_inflate.c to use the smallest work
+ * area possible. And because everything is statically allocated on the
+ * stack then there is no need to clean up before returning.
+ */
+
+int __init __inflate_kernel_data(void)
+{
+ struct z_stream_s stream, *strm = &stream;
+ struct inflate_state state;
+ char *in = __data_loc;
+ int rc;
+
+ /* Check and skip gzip header (assume no filename) */
+ if (in[0] != 0x1f || in[1] != 0x8b || in[2] != 0x08 || in[3] & ~3)
+ return -1;
+ in += 10;
+
+ strm->workspace = &state;
+ strm->next_in = in;
+ strm->avail_in = _edata_loc - __data_loc; /* upper bound */
+ strm->next_out = _sdata;
+ strm->avail_out = _edata_loc - __data_loc;
+ zlib_inflateInit2(strm, -MAX_WBITS);
+ WS(strm)->inflate_state.wsize = 0;
+ WS(strm)->inflate_state.window = NULL;
+ rc = zlib_inflate(strm, Z_FINISH);
+ if (rc == Z_OK || rc == Z_STREAM_END)
+ rc = strm->avail_out; /* should be 0 */
+ return rc;
+}
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 2e21e08de747..b9d6818f1ee1 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -1,28 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/kernel/head-nommu.S
*
* Copyright (C) 1994-2002 Russell King
* Copyright (C) 2003-2006 Hyok S. Choi
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Common kernel startup code (non-paged MM)
- *
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/errno.h>
#include <asm/assembler.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/cp15.h>
#include <asm/thread_info.h>
#include <asm/v7m.h>
#include <asm/mpu.h>
-#include <asm/page.h>
/*
* Kernel startup entry point.
@@ -52,7 +48,11 @@ ENTRY(stext)
THUMB(1: )
#endif
- setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
+#ifdef CONFIG_ARM_VIRT_EXT
+ bl __hyp_stub_install
+#endif
+ @ ensure svc mode and all interrupts masked
+ safe_svcmode_maskall r9
@ and irqs disabled
#if defined(CONFIG_CPU_CP15)
mrc p15, 0, r9, c0, c0 @ get processor id
@@ -67,14 +67,6 @@ ENTRY(stext)
beq __error_p @ yes, error 'p'
#ifdef CONFIG_ARM_MPU
- /* Calculate the size of a region covering just the kernel */
- ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
- ldr r6, =(_end) @ Cover whole kernel
- sub r6, r6, r5 @ Minimum size of region to map
- clz r6, r6 @ Region size must be 2^N...
- rsb r6, r6, #31 @ ...so round up region size
- lsl r6, r6, #MPU_RSR_SZ @ Put size in right field
- orr r6, r6, #(1 << MPU_RSR_EN) @ Set region enabled bit
bl __setup_mpu
#endif
@@ -82,8 +74,8 @@ ENTRY(stext)
ldr r12, [r10, #PROCINFO_INITFUNC]
add r12, r12, r10
ret r12
-1: bl __after_proc_init
- b __mmap_switched
+1: ldr lr, =__mmap_switched
+ b __after_proc_init
ENDPROC(stext)
#ifdef CONFIG_SMP
@@ -96,7 +88,11 @@ ENTRY(secondary_startup)
* the processor type - there is no need to check the machine type
* as it has already been validated by the primary processor.
*/
- setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
+#ifdef CONFIG_ARM_VIRT_EXT
+ bl __hyp_stub_install_secondary
+#endif
+ safe_svcmode_maskall r9
+
#ifndef CONFIG_CPU_CP15
ldr r9, =CONFIG_PROCESSOR_ID
#else
@@ -109,9 +105,7 @@ ENTRY(secondary_startup)
ldr r7, __secondary_data
#ifdef CONFIG_ARM_MPU
- /* Use MPU region info supplied by __cpu_up */
- ldr r6, [r7] @ get secondary_data.mpu_szr
- bl __setup_mpu @ Initialize the MPU
+ bl __secondary_setup_mpu @ Initialize the MPU
#endif
badr lr, 1f @ return (PIC) address
@@ -119,7 +113,9 @@ ENTRY(secondary_startup)
add r12, r12, r10
ret r12
1: bl __after_proc_init
+ ldr r7, __secondary_data @ reload r7
ldr sp, [r7, #12] @ set up the stack pointer
+ ldr r0, [r7, #16] @ set up task pointer
mov fp, #0
b secondary_start_kernel
ENDPROC(secondary_startup)
@@ -132,12 +128,45 @@ __secondary_data:
/*
* Set the Control Register and Read the process ID.
*/
+ .text
__after_proc_init:
+M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB)
+M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB)
+#ifdef CONFIG_ARM_MPU
+M_CLASS(ldr r3, [r12, 0x50])
+AR_CLASS(mrc p15, 0, r3, c0, c1, 4) @ Read ID_MMFR0
+ and r3, r3, #(MMFR0_PMSA) @ PMSA field
+ teq r3, #(MMFR0_PMSAv7) @ PMSA v7
+ beq 1f
+ teq r3, #(MMFR0_PMSAv8) @ PMSA v8
+ /*
+ * Memory region attributes for PMSAv8:
+ *
+ * n = AttrIndx[2:0]
+ * n MAIR
+ * DEVICE_nGnRnE 000 00000000
+ * NORMAL 001 11111111
+ */
+ ldreq r3, =PMSAv8_MAIR(0x00, PMSAv8_RGN_DEVICE_nGnRnE) | \
+ PMSAv8_MAIR(0xff, PMSAv8_RGN_NORMAL)
+AR_CLASS(mcreq p15, 0, r3, c10, c2, 0) @ MAIR 0
+M_CLASS(streq r3, [r12, #PMSAv8_MAIR0])
+ moveq r3, #0
+AR_CLASS(mcreq p15, 0, r3, c10, c2, 1) @ MAIR 1
+M_CLASS(streq r3, [r12, #PMSAv8_MAIR1])
+
+1:
+#endif
#ifdef CONFIG_CPU_CP15
/*
* CP15 system control register value returned in r0 from
* the CPU init function.
*/
+
+#ifdef CONFIG_ARM_MPU
+ biceq r0, r0, #CR_BR @ Disable the 'default mem-map'
+ orreq r0, r0, #CR_M @ Set SCTRL.M (MPU on)
+#endif
#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
orr r0, r0, #CR_A
#else
@@ -153,7 +182,15 @@ __after_proc_init:
bic r0, r0, #CR_I
#endif
mcr p15, 0, r0, c1, c0, 0 @ write control reg
+ instr_sync
#elif defined (CONFIG_CPU_V7M)
+#ifdef CONFIG_ARM_MPU
+ ldreq r3, [r12, MPU_CTRL]
+ biceq r3, #MPU_CTRL_PRIVDEFENA
+ orreq r3, #MPU_CTRL_ENABLE
+ streq r3, [r12, MPU_CTRL]
+ isb
+#endif
/* For V7M systems we want to modify the CCR similarly to the SCTLR */
#ifdef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #V7M_SCB_CCR_DC
@@ -164,9 +201,9 @@ __after_proc_init:
#ifdef CONFIG_CPU_ICACHE_DISABLE
bic r0, r0, #V7M_SCB_CCR_IC
#endif
- movw r3, #:lower16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
- movt r3, #:upper16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
- str r0, [r3]
+ str r0, [r12, V7M_SCB_CCR]
+ /* Pass exc_ret to __mmap_switched */
+ mov r0, r10
#endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */
ret lr
ENDPROC(__after_proc_init)
@@ -175,19 +212,33 @@ ENDPROC(__after_proc_init)
#ifdef CONFIG_ARM_MPU
+#ifndef CONFIG_CPU_V7M
/* Set which MPU region should be programmed */
-.macro set_region_nr tmp, rgnr
+.macro set_region_nr tmp, rgnr, unused
mov \tmp, \rgnr @ Use static region numbers
mcr p15, 0, \tmp, c6, c2, 0 @ Write RGNR
.endm
/* Setup a single MPU region, either D or I side (D-side for unified) */
-.macro setup_region bar, acr, sr, side = MPU_DATA_SIDE
+.macro setup_region bar, acr, sr, side = PMSAv7_DATA_SIDE, unused
mcr p15, 0, \bar, c6, c1, (0 + \side) @ I/DRBAR
mcr p15, 0, \acr, c6, c1, (4 + \side) @ I/DRACR
mcr p15, 0, \sr, c6, c1, (2 + \side) @ I/DRSR
.endm
+#else
+.macro set_region_nr tmp, rgnr, base
+ mov \tmp, \rgnr
+ str \tmp, [\base, #PMSAv7_RNR]
+.endm
+.macro setup_region bar, acr, sr, unused, base
+ lsl \acr, \acr, #16
+ orr \acr, \acr, \sr
+ str \bar, [\base, #PMSAv7_RBAR]
+ str \acr, [\base, #PMSAv7_RASR]
+.endm
+
+#endif
/*
* Setup the MPU and initial MPU Regions. We create the following regions:
* Region 0: Use this for probing the MPU details, so leave disabled.
@@ -195,70 +246,291 @@ ENDPROC(__after_proc_init)
* Region 2: Normal, Shared, cacheable for RAM. From PHYS_OFFSET, size from r6
* Region 3: Normal, shared, inaccessible from PL0 to protect the vectors page
*
- * r6: Value to be written to DRSR (and IRSR if required) for MPU_RAM_REGION
+ * r6: Value to be written to DRSR (and IRSR if required) for PMSAv7_RAM_REGION
*/
+ __HEAD
ENTRY(__setup_mpu)
/* Probe for v7 PMSA compliance */
- mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0
+M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB)
+M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB)
+
+AR_CLASS(mrc p15, 0, r0, c0, c1, 4) @ Read ID_MMFR0
+M_CLASS(ldr r0, [r12, 0x50])
and r0, r0, #(MMFR0_PMSA) @ PMSA field
teq r0, #(MMFR0_PMSAv7) @ PMSA v7
- bne __error_p @ Fail: ARM_MPU on NOT v7 PMSA
+ beq __setup_pmsa_v7
+ teq r0, #(MMFR0_PMSAv8) @ PMSA v8
+ beq __setup_pmsa_v8
+
+ ret lr
+ENDPROC(__setup_mpu)
+
+ENTRY(__setup_pmsa_v7)
+ /* Calculate the size of a region covering just the kernel */
+ ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
+ ldr r6, =(_end) @ Cover whole kernel
+ sub r6, r6, r5 @ Minimum size of region to map
+ clz r6, r6 @ Region size must be 2^N...
+ rsb r6, r6, #31 @ ...so round up region size
+ lsl r6, r6, #PMSAv7_RSR_SZ @ Put size in right field
+ orr r6, r6, #(1 << PMSAv7_RSR_EN) @ Set region enabled bit
/* Determine whether the D/I-side memory map is unified. We set the
* flags here and continue to use them for the rest of this function */
- mrc p15, 0, r0, c0, c0, 4 @ MPUIR
+AR_CLASS(mrc p15, 0, r0, c0, c0, 4) @ MPUIR
+M_CLASS(ldr r0, [r12, #MPU_TYPE])
ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU
- beq __error_p @ Fail: ARM_MPU and no MPU
+ bxeq lr
tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified
/* Setup second region first to free up r6 */
- set_region_nr r0, #MPU_RAM_REGION
+ set_region_nr r0, #PMSAv7_RAM_REGION, r12
isb
/* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET
- ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL)
+ ldr r5,=(PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL)
- setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled
- beq 1f @ Memory-map not unified
- setup_region r0, r5, r6, MPU_INSTR_SIDE @ PHYS_OFFSET, shared, enabled
+ setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ PHYS_OFFSET, shared, enabled
+ beq 1f @ Memory-map not unified
+ setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12 @ PHYS_OFFSET, shared, enabled
1: isb
/* First/background region */
- set_region_nr r0, #MPU_BG_REGION
+ set_region_nr r0, #PMSAv7_BG_REGION, r12
isb
/* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */
mov r0, #0 @ BG region starts at 0x0
- ldr r5,=(MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA)
- mov r6, #MPU_RSR_ALL_MEM @ 4GB region, enabled
+ ldr r5,=(PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0NA)
+ mov r6, #PMSAv7_RSR_ALL_MEM @ 4GB region, enabled
- setup_region r0, r5, r6, MPU_DATA_SIDE @ 0x0, BG region, enabled
- beq 2f @ Memory-map not unified
- setup_region r0, r5, r6, MPU_INSTR_SIDE @ 0x0, BG region, enabled
+ setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ 0x0, BG region, enabled
+ beq 2f @ Memory-map not unified
+ setup_region r0, r5, r6, PMSAv7_INSTR_SIDE r12 @ 0x0, BG region, enabled
2: isb
- /* Vectors region */
- set_region_nr r0, #MPU_VECTORS_REGION
+#ifdef CONFIG_XIP_KERNEL
+ set_region_nr r0, #PMSAv7_ROM_REGION, r12
isb
- /* Shared, inaccessible to PL0, rw PL1 */
- mov r0, #CONFIG_VECTORS_BASE @ Cover from VECTORS_BASE
- ldr r5,=(MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL)
- /* Writing N to bits 5:1 (RSR_SZ) --> region size 2^N+1 */
- mov r6, #(((2 * PAGE_SHIFT - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN)
-
- setup_region r0, r5, r6, MPU_DATA_SIDE @ VECTORS_BASE, PL0 NA, enabled
- beq 3f @ Memory-map not unified
- setup_region r0, r5, r6, MPU_INSTR_SIDE @ VECTORS_BASE, PL0 NA, enabled
+
+ ldr r5,=(PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL)
+
+ ldr r0, =CONFIG_XIP_PHYS_ADDR @ ROM start
+ ldr r6, =(_exiprom) @ ROM end
+ sub r6, r6, r0 @ Minimum size of region to map
+ clz r6, r6 @ Region size must be 2^N...
+ rsb r6, r6, #31 @ ...so round up region size
+ lsl r6, r6, #PMSAv7_RSR_SZ @ Put size in right field
+ orr r6, r6, #(1 << PMSAv7_RSR_EN) @ Set region enabled bit
+
+ setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
+ beq 3f @ Memory-map not unified
+ setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
3: isb
+#endif
+ ret lr
+ENDPROC(__setup_pmsa_v7)
- /* Enable the MPU */
- mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR
- bic r0, r0, #CR_BR @ Disable the 'default mem-map'
- orr r0, r0, #CR_M @ Set SCTRL.M (MPU on)
- mcr p15, 0, r0, c1, c0, 0 @ Enable MPU
+ENTRY(__setup_pmsa_v8)
+ mov r0, #0
+AR_CLASS(mcr p15, 0, r0, c6, c2, 1) @ PRSEL
+M_CLASS(str r0, [r12, #PMSAv8_RNR])
isb
+
+#ifdef CONFIG_XIP_KERNEL
+ ldr r5, =CONFIG_XIP_PHYS_ADDR @ ROM start
+ ldr r6, =(_exiprom) @ ROM end
+ sub r6, r6, #1
+ bic r6, r6, #(PMSAv8_MINALIGN - 1)
+
+ orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED)
+ orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN)
+
+AR_CLASS(mcr p15, 0, r5, c6, c8, 0) @ PRBAR0
+AR_CLASS(mcr p15, 0, r6, c6, c8, 1) @ PRLAR0
+M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(0)])
+M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(0)])
+#endif
+
+ ldr r5, =KERNEL_START
+ ldr r6, =KERNEL_END
+ sub r6, r6, #1
+ bic r6, r6, #(PMSAv8_MINALIGN - 1)
+
+ orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED)
+ orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN)
+
+AR_CLASS(mcr p15, 0, r5, c6, c8, 4) @ PRBAR1
+AR_CLASS(mcr p15, 0, r6, c6, c8, 5) @ PRLAR1
+M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(1)])
+M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(1)])
+
+ /* Setup Background: 0x0 - min(KERNEL_START, XIP_PHYS_ADDR) */
+#ifdef CONFIG_XIP_KERNEL
+ ldr r6, =KERNEL_START
+ ldr r5, =CONFIG_XIP_PHYS_ADDR
+ cmp r6, r5
+ movcs r6, r5
+#else
+ ldr r6, =KERNEL_START
+#endif
+ cmp r6, #0
+ beq 1f
+
+ mov r5, #0
+ sub r6, r6, #1
+ bic r6, r6, #(PMSAv8_MINALIGN - 1)
+
+ orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
+ orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
+
+AR_CLASS(mcr p15, 0, r5, c6, c9, 0) @ PRBAR2
+AR_CLASS(mcr p15, 0, r6, c6, c9, 1) @ PRLAR2
+M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(2)])
+M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(2)])
+
+1:
+ /* Setup Background: max(KERNEL_END, _exiprom) - 0xffffffff */
+#ifdef CONFIG_XIP_KERNEL
+ ldr r5, =KERNEL_END
+ ldr r6, =(_exiprom)
+ cmp r5, r6
+ movcc r5, r6
+#else
+ ldr r5, =KERNEL_END
+#endif
+ mov r6, #0xffffffff
+ bic r6, r6, #(PMSAv8_MINALIGN - 1)
+
+ orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
+ orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
+
+AR_CLASS(mcr p15, 0, r5, c6, c9, 4) @ PRBAR3
+AR_CLASS(mcr p15, 0, r6, c6, c9, 5) @ PRLAR3
+M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(3)])
+M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(3)])
+
+#ifdef CONFIG_XIP_KERNEL
+ /* Setup Background: min(_exiprom, KERNEL_END) - max(KERNEL_START, XIP_PHYS_ADDR) */
+ ldr r5, =(_exiprom)
+ ldr r6, =KERNEL_END
+ cmp r5, r6
+ movcs r5, r6
+
+ ldr r6, =KERNEL_START
+ ldr r0, =CONFIG_XIP_PHYS_ADDR
+ cmp r6, r0
+ movcc r6, r0
+
+ sub r6, r6, #1
+ bic r6, r6, #(PMSAv8_MINALIGN - 1)
+
+ orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
+ orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
+
+#ifdef CONFIG_CPU_V7M
+ /* There is no alias for n == 4 */
+ mov r0, #4
+ str r0, [r12, #PMSAv8_RNR] @ PRSEL
+ isb
+
+ str r5, [r12, #PMSAv8_RBAR_A(0)]
+ str r6, [r12, #PMSAv8_RLAR_A(0)]
+#else
+ mcr p15, 0, r5, c6, c10, 0 @ PRBAR4
+ mcr p15, 0, r6, c6, c10, 1 @ PRLAR4
+#endif
+#endif
ret lr
-ENDPROC(__setup_mpu)
+ENDPROC(__setup_pmsa_v8)
+
+#ifdef CONFIG_SMP
+/*
+ * r6: pointer at mpu_rgn_info
+ */
+
+ .text
+ENTRY(__secondary_setup_mpu)
+ /* Use MPU region info supplied by __cpu_up */
+ ldr r6, [r7] @ get secondary_data.mpu_rgn_info
+
+ /* Probe for v7 PMSA compliance */
+ mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0
+ and r0, r0, #(MMFR0_PMSA) @ PMSA field
+ teq r0, #(MMFR0_PMSAv7) @ PMSA v7
+ beq __secondary_setup_pmsa_v7
+ teq r0, #(MMFR0_PMSAv8) @ PMSA v8
+ beq __secondary_setup_pmsa_v8
+ b __error_p
+ENDPROC(__secondary_setup_mpu)
+
+/*
+ * r6: pointer at mpu_rgn_info
+ */
+ENTRY(__secondary_setup_pmsa_v7)
+ /* Determine whether the D/I-side memory map is unified. We set the
+ * flags here and continue to use them for the rest of this function */
+ mrc p15, 0, r0, c0, c0, 4 @ MPUIR
+ ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU
+ beq __error_p
+
+ ldr r4, [r6, #MPU_RNG_INFO_USED]
+ mov r5, #MPU_RNG_SIZE
+ add r3, r6, #MPU_RNG_INFO_RNGS
+ mla r3, r4, r5, r3
+
+1:
+ tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified
+ sub r3, r3, #MPU_RNG_SIZE
+ sub r4, r4, #1
+
+ set_region_nr r0, r4
+ isb
+
+ ldr r0, [r3, #MPU_RGN_DRBAR]
+ ldr r6, [r3, #MPU_RGN_DRSR]
+ ldr r5, [r3, #MPU_RGN_DRACR]
+
+ setup_region r0, r5, r6, PMSAv7_DATA_SIDE
+ beq 2f
+ setup_region r0, r5, r6, PMSAv7_INSTR_SIDE
+2: isb
+
+ mrc p15, 0, r0, c0, c0, 4 @ Reevaluate the MPUIR
+ cmp r4, #0
+ bgt 1b
+
+ ret lr
+ENDPROC(__secondary_setup_pmsa_v7)
+
+ENTRY(__secondary_setup_pmsa_v8)
+ ldr r4, [r6, #MPU_RNG_INFO_USED]
+#ifndef CONFIG_XIP_KERNEL
+ add r4, r4, #1
#endif
+ mov r5, #MPU_RNG_SIZE
+ add r3, r6, #MPU_RNG_INFO_RNGS
+ mla r3, r4, r5, r3
+
+1:
+ sub r3, r3, #MPU_RNG_SIZE
+ sub r4, r4, #1
+
+ mcr p15, 0, r4, c6, c2, 1 @ PRSEL
+ isb
+
+ ldr r5, [r3, #MPU_RGN_PRBAR]
+ ldr r6, [r3, #MPU_RGN_PRLAR]
+
+ mcr p15, 0, r5, c6, c3, 0 @ PRBAR
+ mcr p15, 0, r6, c6, c3, 1 @ PRLAR
+
+ cmp r4, #0
+ bgt 1b
+
+ ret lr
+ENDPROC(__secondary_setup_pmsa_v8)
+#endif /* CONFIG_SMP */
+#endif /* CONFIG_ARM_MPU */
#include "head-common.S"
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 04286fd9e09c..f22c50d4bd41 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/kernel/head.S
*
@@ -5,28 +6,23 @@
* Copyright (c) 2003 ARM Limited
* All Rights Reserved
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Kernel startup code for all 32-bit CPUs
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/cp15.h>
#include <asm/domain.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/thread_info.h>
-#include <asm/pgtable.h>
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)
#include CONFIG_DEBUG_LL_INCLUDE
#endif
-
/*
* swapper_pg_dir is the virtual address of the initial page table.
* We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must
@@ -34,7 +30,7 @@
* the least significant 16 bits to be 0x8000, but we could probably
* relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000.
*/
-#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
+#define KERNEL_RAM_VADDR (KERNEL_OFFSET + TEXT_OFFSET)
#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
#error KERNEL_RAM_VADDR must start at 0xXXXX8000
#endif
@@ -42,15 +38,32 @@
#ifdef CONFIG_ARM_LPAE
/* LPAE requires an additional page for the PGD */
#define PG_DIR_SIZE 0x5000
-#define PMD_ORDER 3
+#define PMD_ENTRY_ORDER 3 /* PMD entry size is 2^PMD_ENTRY_ORDER */
#else
#define PG_DIR_SIZE 0x4000
-#define PMD_ORDER 2
+#define PMD_ENTRY_ORDER 2
#endif
.globl swapper_pg_dir
.equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
+ /*
+ * This needs to be assigned at runtime when the linker symbols are
+ * resolved. These are unsigned 64bit really, but in this assembly code
+ * We store them as 32bit.
+ */
+ .pushsection .data
+ .align 2
+ .globl kernel_sec_start
+ .globl kernel_sec_end
+kernel_sec_start:
+ .long 0
+ .long 0
+kernel_sec_end:
+ .long 0
+ .long 0
+ .popsection
+
.macro pgtbl, rd, phys
add \rd, \phys, #TEXT_OFFSET
sub \rd, \rd, #PG_DIR_SIZE
@@ -106,10 +119,8 @@ ENTRY(stext)
#endif
#ifndef CONFIG_XIP_KERNEL
- adr r3, 2f
- ldmia r3, {r4, r8}
- sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
- add r8, r8, r4 @ PHYS_OFFSET
+ adr_l r8, _text @ __pa(_text)
+ sub r8, r8, #TEXT_OFFSET @ PHYS_OFFSET
#else
ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case
#endif
@@ -161,10 +172,6 @@ ENTRY(stext)
1: b __enable_mmu
ENDPROC(stext)
.ltorg
-#ifndef CONFIG_XIP_KERNEL
-2: .long .
- .long PAGE_OFFSET
-#endif
/*
* Setup the initial page tables. We only setup the barest
@@ -227,33 +234,49 @@ __create_page_tables:
* Create identity mapping to cater for __enable_mmu.
* This identity mapping will be removed by paging_init().
*/
- adr r0, __turn_mmu_on_loc
- ldmia r0, {r3, r5, r6}
- sub r0, r0, r3 @ virt->phys offset
- add r5, r5, r0 @ phys __turn_mmu_on
- add r6, r6, r0 @ phys __turn_mmu_on_end
+ adr_l r5, __turn_mmu_on @ _pa(__turn_mmu_on)
+ adr_l r6, __turn_mmu_on_end @ _pa(__turn_mmu_on_end)
mov r5, r5, lsr #SECTION_SHIFT
mov r6, r6, lsr #SECTION_SHIFT
1: orr r3, r7, r5, lsl #SECTION_SHIFT @ flags + kernel base
- str r3, [r4, r5, lsl #PMD_ORDER] @ identity mapping
+ str r3, [r4, r5, lsl #PMD_ENTRY_ORDER] @ identity mapping
cmp r5, r6
addlo r5, r5, #1 @ next section
blo 1b
/*
- * Map our RAM from the start to the end of the kernel .bss section.
+ * The main matter: map in the kernel using section mappings, and
+ * set two variables to indicate the physical start and end of the
+ * kernel.
*/
- add r0, r4, #PAGE_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
+ add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
ldr r6, =(_end - 1)
- orr r3, r8, r7
- add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
-1: str r3, [r0], #1 << PMD_ORDER
+
+ /* For XIP, kernel_sec_start/kernel_sec_end are currently in RO memory */
+#ifndef CONFIG_XIP_KERNEL
+ adr_l r5, kernel_sec_start @ _pa(kernel_sec_start)
+#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
+ str r8, [r5, #4] @ Save physical start of kernel (BE)
+#else
+ str r8, [r5] @ Save physical start of kernel (LE)
+#endif
+#endif
+ orr r3, r8, r7 @ Add the MMU flags
+ add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER)
+1: str r3, [r0], #1 << PMD_ENTRY_ORDER
add r3, r3, #1 << SECTION_SHIFT
cmp r0, r6
bls 1b
-
-#ifdef CONFIG_XIP_KERNEL
+#ifndef CONFIG_XIP_KERNEL
+ eor r3, r3, r7 @ Remove the MMU flags
+ adr_l r5, kernel_sec_end @ _pa(kernel_sec_end)
+#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
+ str r3, [r5, #4] @ Save physical end of kernel (BE)
+#else
+ str r3, [r5] @ Save physical end of kernel (LE)
+#endif
+#else
/*
* Map the kernel image separately as it is not located in RAM.
*/
@@ -261,14 +284,14 @@ __create_page_tables:
mov r3, pc
mov r3, r3, lsr #SECTION_SHIFT
orr r3, r7, r3, lsl #SECTION_SHIFT
- add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER)
- str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]!
+ add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
+ str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ENTRY_ORDER]!
ldr r6, =(_edata_loc - 1)
- add r0, r0, #1 << PMD_ORDER
- add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
+ add r0, r0, #1 << PMD_ENTRY_ORDER
+ add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER)
1: cmp r0, r6
add r3, r3, #1 << SECTION_SHIFT
- strls r3, [r0], #1 << PMD_ORDER
+ strls r3, [r0], #1 << PMD_ENTRY_ORDER
bls 1b
#endif
@@ -277,12 +300,11 @@ __create_page_tables:
* We map 2 sections in case the ATAGs/DTB crosses a section boundary.
*/
mov r0, r2, lsr #SECTION_SHIFT
- movs r0, r0, lsl #SECTION_SHIFT
- subne r3, r0, r8
- addne r3, r3, #PAGE_OFFSET
- addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)
- orrne r6, r7, r0
- strne r6, [r3], #1 << PMD_ORDER
+ cmp r2, #0
+ ldrne r3, =FDT_FIXED_BASE >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
+ addne r3, r3, r4
+ orrne r6, r7, r0, lsl #SECTION_SHIFT
+ strne r6, [r3], #1 << PMD_ENTRY_ORDER
addne r6, r6, #1 << SECTION_SHIFT
strne r6, [r3]
@@ -301,7 +323,7 @@ __create_page_tables:
addruart r7, r3, r0
mov r3, r3, lsr #SECTION_SHIFT
- mov r3, r3, lsl #PMD_ORDER
+ mov r3, r3, lsl #PMD_ENTRY_ORDER
add r0, r4, r3
mov r3, r7, lsr #SECTION_SHIFT
@@ -326,12 +348,12 @@ __create_page_tables:
ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
#endif
-#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS)
+#if defined(CONFIG_ARCH_NETWINDER)
/*
* If we're using the NetWinder or CATS, we also need to map
* in the 16550-type serial port for the debug messages
*/
- add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ORDER)
+ add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
orr r3, r7, #0x7c000000
str r3, [r0]
#endif
@@ -341,10 +363,10 @@ __create_page_tables:
* Similar reasons here - for debug. This is
* only for Acorn RiscPC architectures.
*/
- add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ORDER)
+ add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
orr r3, r7, #0x02000000
str r3, [r0]
- add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER)
+ add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
str r3, [r0]
#endif
#endif
@@ -354,11 +376,6 @@ __create_page_tables:
ret lr
ENDPROC(__create_page_tables)
.ltorg
- .align
-__turn_mmu_on_loc:
- .long .
- .long __turn_mmu_on
- .long __turn_mmu_on_end
#if defined(CONFIG_SMP)
.text
@@ -394,11 +411,13 @@ ENTRY(secondary_startup)
/*
* Use the page tables supplied from __cpu_up.
*/
- adr r4, __secondary_data
- ldmia r4, {r5, r7, r12} @ address to jump to after
- sub lr, r4, r5 @ mmu has been enabled
- add r3, r7, lr
- ldrd r4, [r3, #0] @ get secondary_data.pgdir
+#ifdef CONFIG_XIP_KERNEL
+ ldr r3, =(secondary_data + PLAT_PHYS_OFFSET - PAGE_OFFSET)
+#else
+ adr_l r3, secondary_data
+#endif
+ mov_l r12, __secondary_switched
+ ldrd r4, r5, [r3, #0] @ get secondary_data.pgdir
ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps
ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
@@ -412,22 +431,21 @@ ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
ENDPROC(secondary_startup)
ENDPROC(secondary_startup_arm)
- /*
- * r6 = &secondary_data
- */
ENTRY(__secondary_switched)
- ldr sp, [r7, #12] @ get secondary_data.stack
+#if defined(CONFIG_VMAP_STACK) && !defined(CONFIG_ARM_LPAE)
+ @ Before using the vmap'ed stack, we have to switch to swapper_pg_dir
+ @ as the ID map does not cover the vmalloc region.
+ mrc p15, 0, ip, c2, c0, 1 @ read TTBR1
+ mcr p15, 0, ip, c2, c0, 0 @ set TTBR0
+ instr_sync
+#endif
+ adr_l r7, secondary_data + 12 @ get secondary_data.stack
+ ldr sp, [r7]
+ ldr r0, [r7, #4] @ get secondary_data.task
mov fp, #0
b secondary_start_kernel
ENDPROC(__secondary_switched)
- .align
-
- .type __secondary_data, %object
-__secondary_data:
- .long .
- .long secondary_data
- .long __secondary_switched
#endif /* defined(CONFIG_SMP) */
@@ -542,20 +560,13 @@ ARM_BE8(rev r0, r0) @ byteswap if big endian
retne lr
__fixup_smp_on_up:
- adr r0, 1f
- ldmia r0, {r3 - r5}
- sub r3, r0, r3
- add r4, r4, r3
- add r5, r5, r3
+ adr_l r4, __smpalt_begin
+ adr_l r5, __smpalt_end
b __do_fixup_smp_on_up
ENDPROC(__fixup_smp)
- .align
-1: .word .
- .word __smpalt_begin
- .word __smpalt_end
-
.pushsection .data
+ .align 2
.globl smp_on_up
smp_on_up:
ALT_SMP(.long 1)
@@ -567,14 +578,15 @@ smp_on_up:
__do_fixup_smp_on_up:
cmp r4, r5
reths lr
- ldmia r4!, {r0, r6}
- ARM( str r6, [r0, r3] )
- THUMB( add r0, r0, r3 )
+ ldmia r4, {r0, r6}
+ ARM( str r6, [r0, r4] )
+ THUMB( add r0, r0, r4 )
+ add r4, r4, #8
#ifdef __ARMEB__
THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
#endif
THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
- THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
+ THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r0.
THUMB( strh r6, [r0] )
b __do_fixup_smp_on_up
ENDPROC(__do_fixup_smp_on_up)
@@ -583,150 +595,8 @@ ENTRY(fixup_smp)
stmfd sp!, {r4 - r6, lr}
mov r4, r0
add r5, r0, r1
- mov r3, #0
bl __do_fixup_smp_on_up
ldmfd sp!, {r4 - r6, pc}
ENDPROC(fixup_smp)
-#ifdef __ARMEB__
-#define LOW_OFFSET 0x4
-#define HIGH_OFFSET 0x0
-#else
-#define LOW_OFFSET 0x0
-#define HIGH_OFFSET 0x4
-#endif
-
-#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
-
-/* __fixup_pv_table - patch the stub instructions with the delta between
- * PHYS_OFFSET and PAGE_OFFSET, which is assumed to be 16MiB aligned and
- * can be expressed by an immediate shifter operand. The stub instruction
- * has a form of '(add|sub) rd, rn, #imm'.
- */
- __HEAD
-__fixup_pv_table:
- adr r0, 1f
- ldmia r0, {r3-r7}
- mvn ip, #0
- subs r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET
- add r4, r4, r3 @ adjust table start address
- add r5, r5, r3 @ adjust table end address
- add r6, r6, r3 @ adjust __pv_phys_pfn_offset address
- add r7, r7, r3 @ adjust __pv_offset address
- mov r0, r8, lsr #PAGE_SHIFT @ convert to PFN
- str r0, [r6] @ save computed PHYS_OFFSET to __pv_phys_pfn_offset
- strcc ip, [r7, #HIGH_OFFSET] @ save to __pv_offset high bits
- mov r6, r3, lsr #24 @ constant for add/sub instructions
- teq r3, r6, lsl #24 @ must be 16MiB aligned
-THUMB( it ne @ cross section branch )
- bne __error
- str r3, [r7, #LOW_OFFSET] @ save to __pv_offset low bits
- b __fixup_a_pv_table
-ENDPROC(__fixup_pv_table)
-
- .align
-1: .long .
- .long __pv_table_begin
- .long __pv_table_end
-2: .long __pv_phys_pfn_offset
- .long __pv_offset
-
- .text
-__fixup_a_pv_table:
- adr r0, 3f
- ldr r6, [r0]
- add r6, r6, r3
- ldr r0, [r6, #HIGH_OFFSET] @ pv_offset high word
- ldr r6, [r6, #LOW_OFFSET] @ pv_offset low word
- mov r6, r6, lsr #24
- cmn r0, #1
-#ifdef CONFIG_THUMB2_KERNEL
- moveq r0, #0x200000 @ set bit 21, mov to mvn instruction
- lsls r6, #24
- beq 2f
- clz r7, r6
- lsr r6, #24
- lsl r6, r7
- bic r6, #0x0080
- lsrs r7, #1
- orrcs r6, #0x0080
- orr r6, r6, r7, lsl #12
- orr r6, #0x4000
- b 2f
-1: add r7, r3
- ldrh ip, [r7, #2]
-ARM_BE8(rev16 ip, ip)
- tst ip, #0x4000
- and ip, #0x8f00
- orrne ip, r6 @ mask in offset bits 31-24
- orreq ip, r0 @ mask in offset bits 7-0
-ARM_BE8(rev16 ip, ip)
- strh ip, [r7, #2]
- bne 2f
- ldrh ip, [r7]
-ARM_BE8(rev16 ip, ip)
- bic ip, #0x20
- orr ip, ip, r0, lsr #16
-ARM_BE8(rev16 ip, ip)
- strh ip, [r7]
-2: cmp r4, r5
- ldrcc r7, [r4], #4 @ use branch for delay slot
- bcc 1b
- bx lr
-#else
-#ifdef CONFIG_CPU_ENDIAN_BE8
- moveq r0, #0x00004000 @ set bit 22, mov to mvn instruction
-#else
- moveq r0, #0x400000 @ set bit 22, mov to mvn instruction
-#endif
- b 2f
-1: ldr ip, [r7, r3]
-#ifdef CONFIG_CPU_ENDIAN_BE8
- @ in BE8, we load data in BE, but instructions still in LE
- bic ip, ip, #0xff000000
- tst ip, #0x000f0000 @ check the rotation field
- orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24
- biceq ip, ip, #0x00004000 @ clear bit 22
- orreq ip, ip, r0 @ mask in offset bits 7-0
-#else
- bic ip, ip, #0x000000ff
- tst ip, #0xf00 @ check the rotation field
- orrne ip, ip, r6 @ mask in offset bits 31-24
- biceq ip, ip, #0x400000 @ clear bit 22
- orreq ip, ip, r0 @ mask in offset bits 7-0
-#endif
- str ip, [r7, r3]
-2: cmp r4, r5
- ldrcc r7, [r4], #4 @ use branch for delay slot
- bcc 1b
- ret lr
-#endif
-ENDPROC(__fixup_a_pv_table)
-
- .align
-3: .long __pv_offset
-
-ENTRY(fixup_pv_table)
- stmfd sp!, {r4 - r7, lr}
- mov r3, #0 @ no offset
- mov r4, r0 @ r0 = table start
- add r5, r0, r1 @ r1 = table size
- bl __fixup_a_pv_table
- ldmfd sp!, {r4 - r7, pc}
-ENDPROC(fixup_pv_table)
-
- .data
- .globl __pv_phys_pfn_offset
- .type __pv_phys_pfn_offset, %object
-__pv_phys_pfn_offset:
- .word 0
- .size __pv_phys_pfn_offset, . -__pv_phys_pfn_offset
-
- .globl __pv_offset
- .type __pv_offset, %object
-__pv_offset:
- .quad 0
- .size __pv_offset, . -__pv_offset
-#endif
-
#include "head-common.S"
diff --git a/arch/arm/kernel/head.h b/arch/arm/kernel/head.h
new file mode 100644
index 000000000000..0eb5accf7141
--- /dev/null
+++ b/arch/arm/kernel/head.h
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+extern char __data_loc[];
+extern char _edata_loc[];
+extern char _sdata[];
+
+int __init __inflate_kernel_data(void);
diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c
index b09561a6d06a..38a90a3d12b2 100644
--- a/arch/arm/kernel/hibernate.c
+++ b/arch/arm/kernel/hibernate.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Hibernation support specific for ARM
*
@@ -11,8 +12,6 @@
* https://patchwork.kernel.org/patch/96442/
*
* Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
- *
- * License terms: GNU General Public License (GPL) version 2
*/
#include <linux/mm.h>
@@ -20,7 +19,7 @@
#include <asm/system_misc.h>
#include <asm/idmap.h>
#include <asm/suspend.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/sections.h>
#include "reboot.h"
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index af2a7f1e3103..cd4b34c96e35 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -1,16 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) 2009, 2010 ARM Limited
*
@@ -28,6 +17,7 @@
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
#include <linux/smp.h>
+#include <linux/cfi.h>
#include <linux/cpu_pm.h>
#include <linux/coresight.h>
@@ -44,17 +34,17 @@ static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
/* Number of BRP/WRP registers on this CPU. */
-static int core_num_brps;
-static int core_num_wrps;
+static int core_num_brps __ro_after_init;
+static int core_num_wrps __ro_after_init;
/* Debug architecture version. */
-static u8 debug_arch;
+static u8 debug_arch __ro_after_init;
/* Does debug architecture support OS Save and Restore? */
-static bool has_ossr;
+static bool has_ossr __ro_after_init;
/* Maximum supported watchpoint length. */
-static u8 max_watchpoint_len;
+static u8 max_watchpoint_len __ro_after_init;
#define READ_WB_REG_CASE(OP2, M, VAL) \
case ((OP2 << 4) + M): \
@@ -257,6 +247,9 @@ static int enable_monitor_mode(void)
case ARM_DEBUG_ARCH_V7_ECP14:
case ARM_DEBUG_ARCH_V7_1:
case ARM_DEBUG_ARCH_V8:
+ case ARM_DEBUG_ARCH_V8_1:
+ case ARM_DEBUG_ARCH_V8_2:
+ case ARM_DEBUG_ARCH_V8_4:
ARM_DBG_WRITE(c0, c2, 2, (dscr | ARM_DSCR_MDBGEN));
isb();
break;
@@ -456,14 +449,13 @@ static int get_hbp_len(u8 hbp_len)
/*
* Check whether bp virtual address is in kernel space.
*/
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
{
unsigned int len;
unsigned long va;
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- va = info->address;
- len = get_hbp_len(info->ctrl.len);
+ va = hw->address;
+ len = get_hbp_len(hw->ctrl.len);
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
@@ -518,44 +510,45 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
/*
* Construct an arch_hw_breakpoint from a perf_event.
*/
-static int arch_build_bp_info(struct perf_event *bp)
+static int arch_build_bp_info(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
/* Type */
- switch (bp->attr.bp_type) {
+ switch (attr->bp_type) {
case HW_BREAKPOINT_X:
- info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
+ hw->ctrl.type = ARM_BREAKPOINT_EXECUTE;
break;
case HW_BREAKPOINT_R:
- info->ctrl.type = ARM_BREAKPOINT_LOAD;
+ hw->ctrl.type = ARM_BREAKPOINT_LOAD;
break;
case HW_BREAKPOINT_W:
- info->ctrl.type = ARM_BREAKPOINT_STORE;
+ hw->ctrl.type = ARM_BREAKPOINT_STORE;
break;
case HW_BREAKPOINT_RW:
- info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
+ hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
break;
default:
return -EINVAL;
}
/* Len */
- switch (bp->attr.bp_len) {
+ switch (attr->bp_len) {
case HW_BREAKPOINT_LEN_1:
- info->ctrl.len = ARM_BREAKPOINT_LEN_1;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_1;
break;
case HW_BREAKPOINT_LEN_2:
- info->ctrl.len = ARM_BREAKPOINT_LEN_2;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_2;
break;
case HW_BREAKPOINT_LEN_4:
- info->ctrl.len = ARM_BREAKPOINT_LEN_4;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
break;
case HW_BREAKPOINT_LEN_8:
- info->ctrl.len = ARM_BREAKPOINT_LEN_8;
- if ((info->ctrl.type != ARM_BREAKPOINT_EXECUTE)
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_8;
+ if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE)
&& max_watchpoint_len >= 8)
break;
+ fallthrough;
default:
return -EINVAL;
}
@@ -566,24 +559,24 @@ static int arch_build_bp_info(struct perf_event *bp)
* by the hardware and must be aligned to the appropriate number of
* bytes.
*/
- if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
- info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
- info->ctrl.len != ARM_BREAKPOINT_LEN_4)
+ if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
+ hw->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
+ hw->ctrl.len != ARM_BREAKPOINT_LEN_4)
return -EINVAL;
/* Address */
- info->address = bp->attr.bp_addr;
+ hw->address = attr->bp_addr;
/* Privilege */
- info->ctrl.privilege = ARM_BREAKPOINT_USER;
- if (arch_check_bp_in_kernelspace(bp))
- info->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
+ hw->ctrl.privilege = ARM_BREAKPOINT_USER;
+ if (arch_check_bp_in_kernelspace(hw))
+ hw->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
/* Enabled? */
- info->ctrl.enabled = !bp->attr.disabled;
+ hw->ctrl.enabled = !attr->disabled;
/* Mismatch */
- info->ctrl.mismatch = 0;
+ hw->ctrl.mismatch = 0;
return 0;
}
@@ -591,9 +584,10 @@ static int arch_build_bp_info(struct perf_event *bp)
/*
* Validate the arch-specific HW Breakpoint register settings.
*/
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
int ret = 0;
u32 offset, alignment_mask = 0x3;
@@ -602,14 +596,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
return -ENODEV;
/* Build the arch_hw_breakpoint. */
- ret = arch_build_bp_info(bp);
+ ret = arch_build_bp_info(bp, attr, hw);
if (ret)
goto out;
/* Check address alignment. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8)
alignment_mask = 0x7;
- offset = info->address & alignment_mask;
+ offset = hw->address & alignment_mask;
switch (offset) {
case 0:
/* Aligned */
@@ -617,19 +611,21 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
case 1:
case 2:
/* Allow halfword watchpoints and breakpoints. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
+ fallthrough;
case 3:
/* Allow single byte watchpoint. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
break;
+ fallthrough;
default:
ret = -EINVAL;
goto out;
}
- info->address &= ~alignment_mask;
- info->ctrl.len <<= offset;
+ hw->address &= ~alignment_mask;
+ hw->ctrl.len <<= offset;
if (is_default_overflow_handler(bp)) {
/*
@@ -640,7 +636,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
return -EINVAL;
/* We don't allow mismatch breakpoints in kernel space. */
- if (arch_check_bp_in_kernelspace(bp))
+ if (arch_check_bp_in_kernelspace(hw))
return -EPERM;
/*
@@ -655,8 +651,8 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* reports them.
*/
if (!debug_exception_updates_fsr() &&
- (info->ctrl.type == ARM_BREAKPOINT_LOAD ||
- info->ctrl.type == ARM_BREAKPOINT_STORE))
+ (hw->ctrl.type == ARM_BREAKPOINT_LOAD ||
+ hw->ctrl.type == ARM_BREAKPOINT_STORE))
return -EINVAL;
}
@@ -688,26 +684,68 @@ static void disable_single_step(struct perf_event *bp)
arch_install_hw_breakpoint(bp);
}
+/*
+ * Arm32 hardware does not always report a watchpoint hit address that matches
+ * one of the watchpoints set. It can also report an address "near" the
+ * watchpoint if a single instruction access both watched and unwatched
+ * addresses. There is no straight-forward way, short of disassembling the
+ * offending instruction, to map that address back to the watchpoint. This
+ * function computes the distance of the memory access from the watchpoint as a
+ * heuristic for the likelyhood that a given access triggered the watchpoint.
+ *
+ * See this same function in the arm64 platform code, which has the same
+ * problem.
+ *
+ * The function returns the distance of the address from the bytes watched by
+ * the watchpoint. In case of an exact match, it returns 0.
+ */
+static u32 get_distance_from_watchpoint(unsigned long addr, u32 val,
+ struct arch_hw_breakpoint_ctrl *ctrl)
+{
+ u32 wp_low, wp_high;
+ u32 lens, lene;
+
+ lens = __ffs(ctrl->len);
+ lene = __fls(ctrl->len);
+
+ wp_low = val + lens;
+ wp_high = val + lene;
+ if (addr < wp_low)
+ return wp_low - addr;
+ else if (addr > wp_high)
+ return addr - wp_high;
+ else
+ return 0;
+}
+
+static int watchpoint_fault_on_uaccess(struct pt_regs *regs,
+ struct arch_hw_breakpoint *info)
+{
+ return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER;
+}
+
static void watchpoint_handler(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
- int i, access;
- u32 val, ctrl_reg, alignment_mask;
+ int i, access, closest_match = 0;
+ u32 min_dist = -1, dist;
+ u32 val, ctrl_reg;
struct perf_event *wp, **slots;
struct arch_hw_breakpoint *info;
struct arch_hw_breakpoint_ctrl ctrl;
slots = this_cpu_ptr(wp_on_reg);
+ /*
+ * Find all watchpoints that match the reported address. If no exact
+ * match is found. Attribute the hit to the closest watchpoint.
+ */
+ rcu_read_lock();
for (i = 0; i < core_num_wrps; ++i) {
- rcu_read_lock();
-
wp = slots[i];
-
if (wp == NULL)
- goto unlock;
+ continue;
- info = counter_arch_bp(wp);
/*
* The DFAR is an unknown value on debug architectures prior
* to 7.1. Since we only allow a single watchpoint on these
@@ -716,50 +754,69 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
*/
if (debug_arch < ARM_DEBUG_ARCH_V7_1) {
BUG_ON(i > 0);
+ info = counter_arch_bp(wp);
info->trigger = wp->attr.bp_addr;
} else {
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
- alignment_mask = 0x7;
- else
- alignment_mask = 0x3;
-
- /* Check if the watchpoint value matches. */
- val = read_wb_reg(ARM_BASE_WVR + i);
- if (val != (addr & ~alignment_mask))
- goto unlock;
-
- /* Possible match, check the byte address select. */
- ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
- decode_ctrl_reg(ctrl_reg, &ctrl);
- if (!((1 << (addr & alignment_mask)) & ctrl.len))
- goto unlock;
-
/* Check that the access type matches. */
if (debug_exception_updates_fsr()) {
access = (fsr & ARM_FSR_ACCESS_MASK) ?
HW_BREAKPOINT_W : HW_BREAKPOINT_R;
if (!(access & hw_breakpoint_type(wp)))
- goto unlock;
+ continue;
}
+ val = read_wb_reg(ARM_BASE_WVR + i);
+ ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
+ decode_ctrl_reg(ctrl_reg, &ctrl);
+ dist = get_distance_from_watchpoint(addr, val, &ctrl);
+ if (dist < min_dist) {
+ min_dist = dist;
+ closest_match = i;
+ }
+ /* Is this an exact match? */
+ if (dist != 0)
+ continue;
+
/* We have a winner. */
+ info = counter_arch_bp(wp);
info->trigger = addr;
}
pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
+
+ /*
+ * If we triggered a user watchpoint from a uaccess routine,
+ * then handle the stepping ourselves since userspace really
+ * can't help us with this.
+ */
+ if (watchpoint_fault_on_uaccess(regs, info))
+ goto step;
+
perf_bp_event(wp, regs);
/*
- * If no overflow handler is present, insert a temporary
- * mismatch breakpoint so we can single-step over the
- * watchpoint trigger.
+ * Defer stepping to the overflow handler if one is installed.
+ * Otherwise, insert a temporary mismatch breakpoint so that
+ * we can single-step over the watchpoint trigger.
*/
+ if (!is_default_overflow_handler(wp))
+ continue;
+step:
+ enable_single_step(wp, instruction_pointer(regs));
+ }
+
+ if (min_dist > 0 && min_dist != -1) {
+ /* No exact match found. */
+ wp = slots[closest_match];
+ info = counter_arch_bp(wp);
+ info->trigger = addr;
+ pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
+ perf_bp_event(wp, regs);
if (is_default_overflow_handler(wp))
enable_single_step(wp, instruction_pointer(regs));
-
-unlock:
- rcu_read_unlock();
}
+
+ rcu_read_unlock();
}
static void watchpoint_single_step_handler(unsigned long pc)
@@ -830,7 +887,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
info->trigger = addr;
pr_debug("breakpoint fired: address = 0x%x\n", addr);
perf_bp_event(bp, regs);
- if (!bp->overflow_handler)
+ if (is_default_overflow_handler(bp))
enable_single_step(bp, addr);
goto unlock;
}
@@ -847,6 +904,37 @@ unlock:
watchpoint_single_step_handler(addr);
}
+#ifdef CONFIG_CFI
+static void hw_breakpoint_cfi_handler(struct pt_regs *regs)
+{
+ /*
+ * TODO: implementing target and type to pass to CFI using the more
+ * elaborate report_cfi_failure() requires compiler work. To be able
+ * to properly extract target information the compiler needs to
+ * emit a stable instructions sequence for the CFI checks so we can
+ * decode the instructions preceding the trap and figure out which
+ * registers were used.
+ */
+
+ switch (report_cfi_failure_noaddr(regs, instruction_pointer(regs))) {
+ case BUG_TRAP_TYPE_BUG:
+ die("Oops - CFI", regs, 0);
+ break;
+ case BUG_TRAP_TYPE_WARN:
+ /* Skip the breaking instruction */
+ instruction_pointer(regs) += 4;
+ break;
+ default:
+ die("Unknown CFI error", regs, 0);
+ break;
+ }
+}
+#else
+static void hw_breakpoint_cfi_handler(struct pt_regs *regs)
+{
+}
+#endif
+
/*
* Called from either the Data Abort Handler [watchpoint] or the
* Prefetch Abort Handler [breakpoint] with interrupts disabled.
@@ -872,9 +960,13 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
break;
case ARM_ENTRY_ASYNC_WATCHPOINT:
WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
+ fallthrough;
case ARM_ENTRY_SYNC_WATCHPOINT:
watchpoint_handler(addr, fsr, regs);
break;
+ case ARM_ENTRY_CFI_BREAKPOINT:
+ hw_breakpoint_cfi_handler(regs);
+ break;
default:
ret = 1; /* Unhandled fault. */
}
@@ -884,6 +976,23 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
return ret;
}
+#ifdef CONFIG_ARM_ERRATA_764319
+static int oslsr_fault;
+
+static int debug_oslsr_trap(struct pt_regs *regs, unsigned int instr)
+{
+ oslsr_fault = 1;
+ instruction_pointer(regs) += 4;
+ return 0;
+}
+
+static struct undef_hook debug_oslsr_hook = {
+ .instr_mask = 0xffffffff,
+ .instr_val = 0xee115e91,
+ .fn = debug_oslsr_trap,
+};
+#endif
+
/*
* One-time initialisation.
*/
@@ -917,9 +1026,19 @@ static bool core_has_os_save_restore(void)
case ARM_DEBUG_ARCH_V7_1:
return true;
case ARM_DEBUG_ARCH_V7_ECP14:
+#ifdef CONFIG_ARM_ERRATA_764319
+ oslsr_fault = 0;
+ register_undef_hook(&debug_oslsr_hook);
+ ARM_DBG_READ(c1, c1, 4, oslsr);
+ unregister_undef_hook(&debug_oslsr_hook);
+ if (oslsr_fault)
+ return false;
+#else
ARM_DBG_READ(c1, c1, 4, oslsr);
+#endif
if (oslsr & ARM_OSLSR_OSLM0)
return true;
+ fallthrough;
default:
return false;
}
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index ec7e7377d423..3a506b9095a5 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -1,19 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 2012 Linaro Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/init.h>
@@ -22,6 +9,8 @@
#include <asm/assembler.h>
#include <asm/virt.h>
+.arch armv7-a
+
#ifndef ZIMAGE
/*
* For the kernel proper, we need to find out the CPU boot mode long after
@@ -31,46 +20,44 @@
* zeroing of .bss would clobber it.
*/
.data
+ .align 2
ENTRY(__boot_cpu_mode)
.long 0
.text
/*
- * Save the primary CPU boot mode. Requires 3 scratch registers.
+ * Save the primary CPU boot mode. Requires 2 scratch registers.
*/
- .macro store_primary_cpu_mode reg1, reg2, reg3
+ .macro store_primary_cpu_mode reg1, reg2
mrs \reg1, cpsr
and \reg1, \reg1, #MODE_MASK
- adr \reg2, .L__boot_cpu_mode_offset
- ldr \reg3, [\reg2]
- str \reg1, [\reg2, \reg3]
+ str_l \reg1, __boot_cpu_mode, \reg2
.endm
/*
* Compare the current mode with the one saved on the primary CPU.
* If they don't match, record that fact. The Z bit indicates
* if there's a match or not.
- * Requires 3 additionnal scratch registers.
+ * Requires 2 additional scratch registers.
*/
- .macro compare_cpu_mode_with_primary mode, reg1, reg2, reg3
- adr \reg2, .L__boot_cpu_mode_offset
- ldr \reg3, [\reg2]
- ldr \reg1, [\reg2, \reg3]
+ .macro compare_cpu_mode_with_primary mode, reg1, reg2
+ adr_l \reg2, __boot_cpu_mode
+ ldr \reg1, [\reg2]
cmp \mode, \reg1 @ matches primary CPU boot mode?
orrne \reg1, \reg1, #BOOT_CPU_MODE_MISMATCH
- strne \reg1, [\reg2, \reg3] @ record what happened and give up
+ strne \reg1, [\reg2] @ record what happened and give up
.endm
#else /* ZIMAGE */
- .macro store_primary_cpu_mode reg1:req, reg2:req, reg3:req
+ .macro store_primary_cpu_mode reg1:req, reg2:req
.endm
/*
* The zImage loader only runs on one CPU, so we don't bother with mult-CPU
* consistency checking:
*/
- .macro compare_cpu_mode_with_primary mode, reg1, reg2, reg3
+ .macro compare_cpu_mode_with_primary mode, reg1, reg2
cmp \mode, \mode
.endm
@@ -85,7 +72,7 @@ ENTRY(__boot_cpu_mode)
*/
@ Call this from the primary CPU
ENTRY(__hyp_stub_install)
- store_primary_cpu_mode r4, r5, r6
+ store_primary_cpu_mode r4, r5
ENDPROC(__hyp_stub_install)
@ fall through...
@@ -99,7 +86,7 @@ ENTRY(__hyp_stub_install_secondary)
* If the secondary has booted with a different mode, give up
* immediately.
*/
- compare_cpu_mode_with_primary r4, r5, r6, r7
+ compare_cpu_mode_with_primary r4, r5, r6
retne lr
/*
@@ -158,10 +145,9 @@ ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE
#if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER)
@ make CNTP_* and CNTPCT accessible from PL1
mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1
- lsr r7, #16
- and r7, #0xf
- cmp r7, #1
- bne 1f
+ ubfx r7, r7, #16, #4
+ teq r7, #0
+ beq 1f
mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL
orr r7, r7, #3 @ PL1PCEN | PL1PCTEN
mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL
@@ -179,8 +165,8 @@ ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE
@ Check whether GICv3 system registers are available
mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1
ubfx r7, r7, #28, #4
- cmp r7, #1
- bne 2f
+ teq r7, #0
+ beq 2f
@ Enable system register accesses
mrc p15, 4, r7, c12, c9, 5 @ ICC_HSRE
@@ -202,19 +188,19 @@ ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE
ENDPROC(__hyp_stub_install_secondary)
__hyp_stub_do_trap:
+#ifdef ZIMAGE
teq r0, #HVC_SET_VECTORS
bne 1f
+ /* Only the ZIMAGE stubs can change the HYP vectors */
mcr p15, 4, r1, c12, c0, 0 @ set HVBAR
b __hyp_stub_exit
+#endif
1: teq r0, #HVC_SOFT_RESTART
- bne 1f
+ bne 2f
bx r1
-1: teq r0, #HVC_RESET_VECTORS
- beq __hyp_stub_exit
-
- ldr r0, =HVC_STUB_ERR
+2: ldr r0, =HVC_STUB_ERR
__ERET
__hyp_stub_exit:
@@ -223,26 +209,9 @@ __hyp_stub_exit:
ENDPROC(__hyp_stub_do_trap)
/*
- * __hyp_set_vectors: Call this after boot to set the initial hypervisor
- * vectors as part of hypervisor installation. On an SMP system, this should
- * be called on each CPU.
- *
- * r0 must be the physical address of the new vector table (which must lie in
- * the bottom 4GB of physical address space.
- *
- * r0 must be 32-byte aligned.
- *
- * Before calling this, you must check that the stub hypervisor is installed
- * everywhere, by waiting for any secondary CPUs to be brought up and then
- * checking that BOOT_CPU_MODE_HAVE_HYP(__boot_cpu_mode) is true.
- *
- * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or
- * something else went wrong... in such cases, trying to install a new
- * hypervisor is unlikely to work as desired.
- *
- * When you call into your shiny new hypervisor, sp_hyp will contain junk,
- * so you will need to set that to something sensible at the new hypervisor's
- * initialisation entry point.
+ * __hyp_set_vectors is only used when ZIMAGE must bounce between HYP
+ * and SVC. For the kernel itself, the vectors are set once and for
+ * all by the stubs.
*/
ENTRY(__hyp_set_vectors)
mov r1, r0
@@ -258,18 +227,6 @@ ENTRY(__hyp_soft_restart)
ret lr
ENDPROC(__hyp_soft_restart)
-ENTRY(__hyp_reset_vectors)
- mov r0, #HVC_RESET_VECTORS
- __HVC(0)
- ret lr
-ENDPROC(__hyp_reset_vectors)
-
-#ifndef ZIMAGE
-.align 2
-.L__boot_cpu_mode_offset:
- .long __boot_cpu_mode - .
-#endif
-
.align 5
ENTRY(__hyp_stub_vectors)
__hyp_stub_reset: W(b) .
diff --git a/arch/arm/kernel/insn.c b/arch/arm/kernel/insn.c
index b760340b7014..db0acbb7d7a0 100644
--- a/arch/arm/kernel/insn.c
+++ b/arch/arm/kernel/insn.c
@@ -1,9 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/bug.h>
#include <linux/kernel.h>
#include <asm/opcodes.h>
-static unsigned long
-__arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link)
+static unsigned long __arm_gen_branch_thumb2(unsigned long pc,
+ unsigned long addr, bool link,
+ bool warn)
{
unsigned long s, j1, j2, i1, i2, imm10, imm11;
unsigned long first, second;
@@ -11,7 +13,7 @@ __arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link)
offset = (long)addr - (long)(pc + 4);
if (offset < -16777216 || offset > 16777214) {
- WARN_ON_ONCE(1);
+ WARN_ON_ONCE(warn);
return 0;
}
@@ -32,8 +34,8 @@ __arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link)
return __opcode_thumb32_compose(first, second);
}
-static unsigned long
-__arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link)
+static unsigned long __arm_gen_branch_arm(unsigned long pc, unsigned long addr,
+ bool link, bool warn)
{
unsigned long opcode = 0xea000000;
long offset;
@@ -43,7 +45,7 @@ __arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link)
offset = (long)addr - (long)(pc + 8);
if (unlikely(offset < -33554432 || offset > 33554428)) {
- WARN_ON_ONCE(1);
+ WARN_ON_ONCE(warn);
return 0;
}
@@ -53,10 +55,10 @@ __arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link)
}
unsigned long
-__arm_gen_branch(unsigned long pc, unsigned long addr, bool link)
+__arm_gen_branch(unsigned long pc, unsigned long addr, bool link, bool warn)
{
if (IS_ENABLED(CONFIG_THUMB2_KERNEL))
- return __arm_gen_branch_thumb2(pc, addr, link);
+ return __arm_gen_branch_thumb2(pc, addr, link, warn);
else
- return __arm_gen_branch_arm(pc, addr, link);
+ return __arm_gen_branch_arm(pc, addr, link, warn);
}
diff --git a/arch/arm/kernel/io.c b/arch/arm/kernel/io.c
index eedefe050022..60b621295d6c 100644
--- a/arch/arm/kernel/io.c
+++ b/arch/arm/kernel/io.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <linux/types.h>
#include <linux/io.h>
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index ece04a457486..e1993e28a9ec 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/irq.c
*
@@ -8,10 +9,6 @@
* Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and
* Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* This file contains the code used by various IRQ handling routines:
* asking for different IRQ's should be done through these routines
* instead of just grabbing them. Thus setups with different IRQ numbers
@@ -21,7 +18,6 @@
* IRQ's are in fact implemented a bit like signal handlers for the kernel.
* Naturally it's not a 1:1 relation, but there are similarities.
*/
-#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
@@ -31,23 +27,64 @@
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/seq_file.h>
-#include <linux/ratelimit.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/kallsyms.h>
#include <linux/proc_fs.h>
#include <linux/export.h>
+#include <linux/vmalloc.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/hardware/cache-uniphier.h>
#include <asm/outercache.h>
+#include <asm/softirq_stack.h>
#include <asm/exception.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
+#include "reboot.h"
+
unsigned long irq_err_count;
+#ifdef CONFIG_IRQSTACKS
+
+asmlinkage DEFINE_PER_CPU_READ_MOSTLY(u8 *, irq_stack_ptr);
+
+static void __init init_irq_stacks(void)
+{
+ u8 *stack;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ if (!IS_ENABLED(CONFIG_VMAP_STACK))
+ stack = (u8 *)__get_free_pages(GFP_KERNEL,
+ THREAD_SIZE_ORDER);
+ else
+ stack = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN,
+ THREADINFO_GFP, NUMA_NO_NODE,
+ __builtin_return_address(0));
+
+ if (WARN_ON(!stack))
+ break;
+ per_cpu(irq_stack_ptr, cpu) = &stack[THREAD_SIZE];
+ }
+}
+
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
+static void ____do_softirq(void *arg)
+{
+ __do_softirq();
+}
+
+void do_softirq_own_stack(void)
+{
+ call_with_stack(____do_softirq, NULL,
+ __this_cpu_read(irq_stack_ptr));
+}
+#endif
+#endif
+
int arch_show_interrupts(struct seq_file *p, int prec)
{
#ifdef CONFIG_FIQ
@@ -68,22 +105,31 @@ int arch_show_interrupts(struct seq_file *p, int prec)
*/
void handle_IRQ(unsigned int irq, struct pt_regs *regs)
{
- __handle_domain_irq(NULL, irq, false, regs);
-}
+ struct irq_desc *desc;
-/*
- * asm_do_IRQ is the interface to be used from assembly code.
- */
-asmlinkage void __exception_irq_entry
-asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
-{
- handle_IRQ(irq, regs);
+ /*
+ * Some hardware gives randomly wrong interrupts. Rather
+ * than crashing, do something sensible.
+ */
+ if (unlikely(!irq || irq >= irq_get_nr_irqs()))
+ desc = NULL;
+ else
+ desc = irq_to_desc(irq);
+
+ if (likely(desc))
+ handle_irq_desc(desc);
+ else
+ ack_bad_irq(irq);
}
void __init init_IRQ(void)
{
int ret;
+#ifdef CONFIG_IRQSTACKS
+ init_irq_stacks();
+#endif
+
if (IS_ENABLED(CONFIG_OF) && !machine_desc->init_irq)
irqchip_init();
else
@@ -102,81 +148,9 @@ void __init init_IRQ(void)
uniphier_cache_init();
}
-#ifdef CONFIG_MULTI_IRQ_HANDLER
-void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
-{
- if (handle_arch_irq)
- return;
-
- handle_arch_irq = handle_irq;
-}
-#endif
-
#ifdef CONFIG_SPARSE_IRQ
int __init arch_probe_nr_irqs(void)
{
- nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS;
- return nr_irqs;
+ return irq_set_nr_irqs(machine_desc->nr_irqs ? : NR_IRQS);
}
#endif
-
-#ifdef CONFIG_HOTPLUG_CPU
-static bool migrate_one_irq(struct irq_desc *desc)
-{
- struct irq_data *d = irq_desc_get_irq_data(desc);
- const struct cpumask *affinity = irq_data_get_affinity_mask(d);
- struct irq_chip *c;
- bool ret = false;
-
- /*
- * If this is a per-CPU interrupt, or the affinity does not
- * include this CPU, then we have nothing to do.
- */
- if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
- return false;
-
- if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
- affinity = cpu_online_mask;
- ret = true;
- }
-
- c = irq_data_get_irq_chip(d);
- if (!c->irq_set_affinity)
- pr_debug("IRQ%u: unable to set affinity\n", d->irq);
- else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
- cpumask_copy(irq_data_get_affinity_mask(d), affinity);
-
- return ret;
-}
-
-/*
- * The current CPU has been marked offline. Migrate IRQs off this CPU.
- * If the affinity settings do not allow other CPUs, force them onto any
- * available CPU.
- *
- * Note: we must iterate over all IRQs, whether they have an attached
- * action structure or not, as we need to get chained interrupts too.
- */
-void migrate_irqs(void)
-{
- unsigned int i;
- struct irq_desc *desc;
- unsigned long flags;
-
- local_irq_save(flags);
-
- for_each_irq_desc(i, desc) {
- bool affinity_broken;
-
- raw_spin_lock(&desc->lock);
- affinity_broken = migrate_one_irq(desc);
- raw_spin_unlock(&desc->lock);
-
- if (affinity_broken)
- pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
- i, smp_processor_id());
- }
-
- local_irq_restore(flags);
-}
-#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/arm/kernel/isa.c b/arch/arm/kernel/isa.c
index 9d1cf7156895..db8be609fab2 100644
--- a/arch/arm/kernel/isa.c
+++ b/arch/arm/kernel/isa.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* linux/arch/arm/kernel/isa.c
*
* Copyright (C) 1999 Phil Blundell
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* ISA shared memory and I/O port support, and is required to support
* iopl, inb, outb and friends in userspace via glibc emulation.
*/
@@ -20,7 +16,7 @@
static unsigned int isa_membase, isa_portbase, isa_portshift;
-static struct ctl_table ctl_isa_vars[4] = {
+static const struct ctl_table ctl_isa_vars[] = {
{
.procname = "membase",
.data = &isa_membase,
@@ -39,32 +35,16 @@ static struct ctl_table ctl_isa_vars[4] = {
.maxlen = sizeof(isa_portshift),
.mode = 0444,
.proc_handler = proc_dointvec,
- }, {}
+ },
};
static struct ctl_table_header *isa_sysctl_header;
-static struct ctl_table ctl_isa[2] = {
- {
- .procname = "isa",
- .mode = 0555,
- .child = ctl_isa_vars,
- }, {}
-};
-
-static struct ctl_table ctl_bus[2] = {
- {
- .procname = "bus",
- .mode = 0555,
- .child = ctl_isa,
- }, {}
-};
-
void __init
register_isa_ports(unsigned int membase, unsigned int portbase, unsigned int portshift)
{
isa_membase = membase;
isa_portbase = portbase;
isa_portshift = portshift;
- isa_sysctl_header = register_sysctl_table(ctl_bus);
+ isa_sysctl_header = register_sysctl("bus/isa", ctl_isa_vars);
}
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
index 49fadbda8c63..4a335d3c5969 100644
--- a/arch/arm/kernel/iwmmxt.S
+++ b/arch/arm/kernel/iwmmxt.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/kernel/iwmmxt.S
*
@@ -8,10 +9,6 @@
*
* Full lazy switching support, optimizations and more, by Nicolas Pitre
* Copyright (c) 2003-2004, MontaVista Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/linkage.h>
@@ -19,18 +16,7 @@
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
-
-#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
-#define PJ4(code...) code
-#define XSC(code...)
-#elif defined(CONFIG_CPU_MOHAWK) || \
- defined(CONFIG_CPU_XSC3) || \
- defined(CONFIG_CPU_XSCALE)
-#define PJ4(code...)
-#define XSC(code...) code
-#else
-#error "Unsupported iWMMXt architecture"
-#endif
+#include "iwmmxt.h"
#define MMX_WR0 (0x00)
#define MMX_WR1 (0x08)
@@ -60,9 +46,19 @@
.text
.arm
+ENTRY(iwmmxt_undef_handler)
+ push {r9, r10, lr}
+ get_thread_info r10
+ mov r9, pc
+ b iwmmxt_task_enable
+ mov r0, #0
+ pop {r9, r10, pc}
+ENDPROC(iwmmxt_undef_handler)
+
/*
* Lazy switching of Concan coprocessor context
*
+ * r0 = struct pt_regs pointer
* r10 = struct thread_info pointer
* r9 = ret_from_exception
* lr = undefined instr exit
@@ -73,25 +69,21 @@
ENTRY(iwmmxt_task_enable)
inc_preempt_count r10, r3
- XSC(mrc p15, 0, r2, c15, c1, 0)
- PJ4(mrc p15, 0, r2, c1, c0, 2)
+ mrc p15, 0, r2, c15, c1, 0
@ CP0 and CP1 accessible?
- XSC(tst r2, #0x3)
- PJ4(tst r2, #0xf)
+ tst r2, #0x3
bne 4f @ if so no business here
@ enable access to CP0 and CP1
- XSC(orr r2, r2, #0x3)
- XSC(mcr p15, 0, r2, c15, c1, 0)
- PJ4(orr r2, r2, #0xf)
- PJ4(mcr p15, 0, r2, c1, c0, 2)
+ orr r2, r2, #0x3
+ mcr p15, 0, r2, c15, c1, 0
ldr r3, =concan_owner
- add r0, r10, #TI_IWMMXT_STATE @ get task Concan save area
- ldr r2, [sp, #60] @ current task pc value
+ ldr r2, [r0, #S_PC] @ current task pc value
ldr r1, [r3] @ get current Concan owner
- str r0, [r3] @ this task now owns Concan regs
sub r2, r2, #4 @ adjust pc back
- str r2, [sp, #60]
+ str r2, [r0, #S_PC]
+ add r0, r10, #TI_IWMMXT_STATE @ get task Concan save area
+ str r0, [r3] @ this task now owns Concan regs
mrc p15, 0, r2, c2, c0, 0
mov r2, r2 @ cpwait
@@ -116,33 +108,33 @@ concan_save:
concan_dump:
- wstrw wCSSF, [r1, #MMX_WCSSF]
- wstrw wCASF, [r1, #MMX_WCASF]
- wstrw wCGR0, [r1, #MMX_WCGR0]
- wstrw wCGR1, [r1, #MMX_WCGR1]
- wstrw wCGR2, [r1, #MMX_WCGR2]
- wstrw wCGR3, [r1, #MMX_WCGR3]
+ wstrw wCSSF, r1, MMX_WCSSF
+ wstrw wCASF, r1, MMX_WCASF
+ wstrw wCGR0, r1, MMX_WCGR0
+ wstrw wCGR1, r1, MMX_WCGR1
+ wstrw wCGR2, r1, MMX_WCGR2
+ wstrw wCGR3, r1, MMX_WCGR3
1: @ MUP? wRn
tst r2, #0x2
beq 2f
- wstrd wR0, [r1, #MMX_WR0]
- wstrd wR1, [r1, #MMX_WR1]
- wstrd wR2, [r1, #MMX_WR2]
- wstrd wR3, [r1, #MMX_WR3]
- wstrd wR4, [r1, #MMX_WR4]
- wstrd wR5, [r1, #MMX_WR5]
- wstrd wR6, [r1, #MMX_WR6]
- wstrd wR7, [r1, #MMX_WR7]
- wstrd wR8, [r1, #MMX_WR8]
- wstrd wR9, [r1, #MMX_WR9]
- wstrd wR10, [r1, #MMX_WR10]
- wstrd wR11, [r1, #MMX_WR11]
- wstrd wR12, [r1, #MMX_WR12]
- wstrd wR13, [r1, #MMX_WR13]
- wstrd wR14, [r1, #MMX_WR14]
- wstrd wR15, [r1, #MMX_WR15]
+ wstrd wR0, r1, MMX_WR0
+ wstrd wR1, r1, MMX_WR1
+ wstrd wR2, r1, MMX_WR2
+ wstrd wR3, r1, MMX_WR3
+ wstrd wR4, r1, MMX_WR4
+ wstrd wR5, r1, MMX_WR5
+ wstrd wR6, r1, MMX_WR6
+ wstrd wR7, r1, MMX_WR7
+ wstrd wR8, r1, MMX_WR8
+ wstrd wR9, r1, MMX_WR9
+ wstrd wR10, r1, MMX_WR10
+ wstrd wR11, r1, MMX_WR11
+ wstrd wR12, r1, MMX_WR12
+ wstrd wR13, r1, MMX_WR13
+ wstrd wR14, r1, MMX_WR14
+ wstrd wR15, r1, MMX_WR15
2: teq r0, #0 @ anything to load?
reteq lr @ if not, return
@@ -150,30 +142,30 @@ concan_dump:
concan_load:
@ Load wRn
- wldrd wR0, [r0, #MMX_WR0]
- wldrd wR1, [r0, #MMX_WR1]
- wldrd wR2, [r0, #MMX_WR2]
- wldrd wR3, [r0, #MMX_WR3]
- wldrd wR4, [r0, #MMX_WR4]
- wldrd wR5, [r0, #MMX_WR5]
- wldrd wR6, [r0, #MMX_WR6]
- wldrd wR7, [r0, #MMX_WR7]
- wldrd wR8, [r0, #MMX_WR8]
- wldrd wR9, [r0, #MMX_WR9]
- wldrd wR10, [r0, #MMX_WR10]
- wldrd wR11, [r0, #MMX_WR11]
- wldrd wR12, [r0, #MMX_WR12]
- wldrd wR13, [r0, #MMX_WR13]
- wldrd wR14, [r0, #MMX_WR14]
- wldrd wR15, [r0, #MMX_WR15]
+ wldrd wR0, r0, MMX_WR0
+ wldrd wR1, r0, MMX_WR1
+ wldrd wR2, r0, MMX_WR2
+ wldrd wR3, r0, MMX_WR3
+ wldrd wR4, r0, MMX_WR4
+ wldrd wR5, r0, MMX_WR5
+ wldrd wR6, r0, MMX_WR6
+ wldrd wR7, r0, MMX_WR7
+ wldrd wR8, r0, MMX_WR8
+ wldrd wR9, r0, MMX_WR9
+ wldrd wR10, r0, MMX_WR10
+ wldrd wR11, r0, MMX_WR11
+ wldrd wR12, r0, MMX_WR12
+ wldrd wR13, r0, MMX_WR13
+ wldrd wR14, r0, MMX_WR14
+ wldrd wR15, r0, MMX_WR15
@ Load wCx
- wldrw wCSSF, [r0, #MMX_WCSSF]
- wldrw wCASF, [r0, #MMX_WCASF]
- wldrw wCGR0, [r0, #MMX_WCGR0]
- wldrw wCGR1, [r0, #MMX_WCGR1]
- wldrw wCGR2, [r0, #MMX_WCGR2]
- wldrw wCGR3, [r0, #MMX_WCGR3]
+ wldrw wCSSF, r0, MMX_WCSSF
+ wldrw wCASF, r0, MMX_WCASF
+ wldrw wCGR0, r0, MMX_WCGR0
+ wldrw wCGR1, r0, MMX_WCGR1
+ wldrw wCGR2, r0, MMX_WCGR2
+ wldrw wCGR3, r0, MMX_WCGR3
@ clear CUP/MUP (only if r1 != 0)
teq r1, #0
@@ -210,12 +202,9 @@ ENTRY(iwmmxt_task_disable)
bne 1f @ no: quit
@ enable access to CP0 and CP1
- XSC(mrc p15, 0, r4, c15, c1, 0)
- XSC(orr r4, r4, #0x3)
- XSC(mcr p15, 0, r4, c15, c1, 0)
- PJ4(mrc p15, 0, r4, c1, c0, 2)
- PJ4(orr r4, r4, #0xf)
- PJ4(mcr p15, 0, r4, c1, c0, 2)
+ mrc p15, 0, r4, c15, c1, 0
+ orr r4, r4, #0x3
+ mcr p15, 0, r4, c15, c1, 0
mov r0, #0 @ nothing to load
str r0, [r3] @ no more current owner
@@ -224,10 +213,8 @@ ENTRY(iwmmxt_task_disable)
bl concan_save
@ disable access to CP0 and CP1
- XSC(bic r4, r4, #0x3)
- XSC(mcr p15, 0, r4, c15, c1, 0)
- PJ4(bic r4, r4, #0xf)
- PJ4(mcr p15, 0, r4, c1, c0, 2)
+ bic r4, r4, #0x3
+ mcr p15, 0, r4, c15, c1, 0
mrc p15, 0, r2, c2, c0, 0
mov r2, r2 @ cpwait
@@ -322,11 +309,9 @@ ENDPROC(iwmmxt_task_restore)
*/
ENTRY(iwmmxt_task_switch)
- XSC(mrc p15, 0, r1, c15, c1, 0)
- PJ4(mrc p15, 0, r1, c1, c0, 2)
+ mrc p15, 0, r1, c15, c1, 0
@ CP0 and CP1 accessible?
- XSC(tst r1, #0x3)
- PJ4(tst r1, #0xf)
+ tst r1, #0x3
bne 1f @ yes: block them for next task
ldr r2, =concan_owner
@@ -336,10 +321,8 @@ ENTRY(iwmmxt_task_switch)
retne lr @ no: leave Concan disabled
1: @ flip Concan access
- XSC(eor r1, r1, #0x3)
- XSC(mcr p15, 0, r1, c15, c1, 0)
- PJ4(eor r1, r1, #0xf)
- PJ4(mcr p15, 0, r1, c1, c0, 2)
+ eor r1, r1, #0x3
+ mcr p15, 0, r1, c15, c1, 0
mrc p15, 0, r1, c2, c0, 0
sub pc, lr, r1, lsr #32 @ cpwait and return
@@ -367,6 +350,7 @@ ENTRY(iwmmxt_task_release)
ENDPROC(iwmmxt_task_release)
.data
+ .align 2
concan_owner:
.word 0
diff --git a/arch/arm/kernel/iwmmxt.h b/arch/arm/kernel/iwmmxt.h
new file mode 100644
index 000000000000..fb627286f5bb
--- /dev/null
+++ b/arch/arm/kernel/iwmmxt.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __IWMMXT_H__
+#define __IWMMXT_H__
+
+.irp b, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
+.set .LwR\b, \b
+.set .Lr\b, \b
+.endr
+
+.set .LwCSSF, 0x2
+.set .LwCASF, 0x3
+.set .LwCGR0, 0x8
+.set .LwCGR1, 0x9
+.set .LwCGR2, 0xa
+.set .LwCGR3, 0xb
+
+.macro wldrd, reg:req, base:req, offset:req
+.inst 0xedd00100 | (.L\reg << 12) | (.L\base << 16) | (\offset >> 2)
+.endm
+
+.macro wldrw, reg:req, base:req, offset:req
+.inst 0xfd900100 | (.L\reg << 12) | (.L\base << 16) | (\offset >> 2)
+.endm
+
+.macro wstrd, reg:req, base:req, offset:req
+.inst 0xedc00100 | (.L\reg << 12) | (.L\base << 16) | (\offset >> 2)
+.endm
+
+.macro wstrw, reg:req, base:req, offset:req
+.inst 0xfd800100 | (.L\reg << 12) | (.L\base << 16) | (\offset >> 2)
+.endm
+
+#ifdef __clang__
+
+#define wCon c1
+
+.macro tmrc, dest:req, control:req
+mrc p1, 0, \dest, \control, c0, 0
+.endm
+
+.macro tmcr, control:req, src:req
+mcr p1, 0, \src, \control, c0, 0
+.endm
+#endif
+
+#endif
diff --git a/arch/arm/kernel/jump_label.c b/arch/arm/kernel/jump_label.c
index 845a5dd9c42b..a06a92d0f550 100644
--- a/arch/arm/kernel/jump_label.c
+++ b/arch/arm/kernel/jump_label.c
@@ -1,10 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/jump_label.h>
-#include <asm/patch.h>
+#include <asm/text-patching.h>
#include <asm/insn.h>
-#ifdef HAVE_JUMP_LABEL
-
static void __arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type,
bool is_static)
@@ -28,11 +27,3 @@ void arch_jump_label_transform(struct jump_entry *entry,
{
__arch_jump_label_transform(entry, type, false);
}
-
-void arch_jump_label_transform_static(struct jump_entry *entry,
- enum jump_label_type type)
-{
- __arch_jump_label_transform(entry, type, true);
-}
-
-#endif
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index 1bb4c40a3135..ab76c55fd610 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* arch/arm/kernel/kgdb.c
*
@@ -14,7 +15,7 @@
#include <linux/kgdb.h>
#include <linux/uaccess.h>
-#include <asm/patch.h>
+#include <asm/text-patching.h>
#include <asm/traps.h>
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
@@ -153,33 +154,37 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr)
return 0;
}
-static struct undef_hook kgdb_brkpt_hook = {
+static struct undef_hook kgdb_brkpt_arm_hook = {
.instr_mask = 0xffffffff,
.instr_val = KGDB_BREAKINST,
- .cpsr_mask = MODE_MASK,
+ .cpsr_mask = PSR_T_BIT | MODE_MASK,
.cpsr_val = SVC_MODE,
.fn = kgdb_brk_fn
};
-static struct undef_hook kgdb_compiled_brkpt_hook = {
+static struct undef_hook kgdb_brkpt_thumb_hook = {
+ .instr_mask = 0xffff,
+ .instr_val = KGDB_BREAKINST & 0xffff,
+ .cpsr_mask = PSR_T_BIT | MODE_MASK,
+ .cpsr_val = PSR_T_BIT | SVC_MODE,
+ .fn = kgdb_brk_fn
+};
+
+static struct undef_hook kgdb_compiled_brkpt_arm_hook = {
.instr_mask = 0xffffffff,
.instr_val = KGDB_COMPILED_BREAK,
- .cpsr_mask = MODE_MASK,
+ .cpsr_mask = PSR_T_BIT | MODE_MASK,
.cpsr_val = SVC_MODE,
.fn = kgdb_compiled_brk_fn
};
-static void kgdb_call_nmi_hook(void *ignored)
-{
- kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
-}
-
-void kgdb_roundup_cpus(unsigned long flags)
-{
- local_irq_enable();
- smp_call_function(kgdb_call_nmi_hook, NULL, 0);
- local_irq_disable();
-}
+static struct undef_hook kgdb_compiled_brkpt_thumb_hook = {
+ .instr_mask = 0xffff,
+ .instr_val = KGDB_COMPILED_BREAK & 0xffff,
+ .cpsr_mask = PSR_T_BIT | MODE_MASK,
+ .cpsr_val = PSR_T_BIT | SVC_MODE,
+ .fn = kgdb_compiled_brk_fn
+};
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{
@@ -221,8 +226,10 @@ int kgdb_arch_init(void)
if (ret != 0)
return ret;
- register_undef_hook(&kgdb_brkpt_hook);
- register_undef_hook(&kgdb_compiled_brkpt_hook);
+ register_undef_hook(&kgdb_brkpt_arm_hook);
+ register_undef_hook(&kgdb_brkpt_thumb_hook);
+ register_undef_hook(&kgdb_compiled_brkpt_arm_hook);
+ register_undef_hook(&kgdb_compiled_brkpt_thumb_hook);
return 0;
}
@@ -235,8 +242,10 @@ int kgdb_arch_init(void)
*/
void kgdb_arch_exit(void)
{
- unregister_undef_hook(&kgdb_brkpt_hook);
- unregister_undef_hook(&kgdb_compiled_brkpt_hook);
+ unregister_undef_hook(&kgdb_brkpt_arm_hook);
+ unregister_undef_hook(&kgdb_brkpt_thumb_hook);
+ unregister_undef_hook(&kgdb_compiled_brkpt_arm_hook);
+ unregister_undef_hook(&kgdb_compiled_brkpt_thumb_hook);
unregister_die_notifier(&kgdb_notifier);
}
@@ -247,7 +256,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
/* patch_text() only supports int-sized breakpoints */
BUILD_BUG_ON(sizeof(int) != BREAK_INSTR_SIZE);
- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
+ err = copy_from_kernel_nofault(bpt->saved_instr, (char *)bpt->bpt_addr,
BREAK_INSTR_SIZE);
if (err)
return err;
@@ -273,7 +282,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
* and we handle the normal undef case within the do_undefinstr
* handler.
*/
-struct kgdb_arch arch_kgdb_ops = {
+const struct kgdb_arch arch_kgdb_ops = {
#ifndef __ARMEB__
.gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
#else /* ! __ARMEB__ */
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index fe1419eeb932..dd430477e7c1 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* machine_kexec.c - handle transition of Linux booting another kernel
*/
@@ -9,11 +10,10 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/memblock.h>
-#include <asm/pgtable.h>
#include <linux/of_fdt.h>
-#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
+#include <asm/kexec-internal.h>
#include <asm/fncpy.h>
#include <asm/mach-types.h>
#include <asm/smp_plat.h>
@@ -23,11 +23,6 @@
extern void relocate_new_kernel(void);
extern const unsigned int relocate_new_kernel_size;
-extern unsigned long kexec_start_address;
-extern unsigned long kexec_indirection_page;
-extern unsigned long kexec_mach_type;
-extern unsigned long kexec_boot_atags;
-
static atomic_t waiting_for_crash_ipi;
/*
@@ -78,11 +73,13 @@ void machine_kexec_cleanup(struct kimage *image)
{
}
-void machine_crash_nonpanic_core(void *unused)
+static void machine_crash_nonpanic_core(void *unused)
{
struct pt_regs regs;
- crash_setup_regs(&regs, NULL);
+ local_fiq_disable();
+
+ crash_setup_regs(&regs, get_irq_regs());
printk(KERN_DEBUG "CPU %u will stop doing anything useful since another CPU has crashed\n",
smp_processor_id());
crash_save_cpu(&regs, smp_processor_id());
@@ -90,41 +87,35 @@ void machine_crash_nonpanic_core(void *unused)
set_cpu_online(smp_processor_id(), false);
atomic_dec(&waiting_for_crash_ipi);
- while (1)
+
+ while (1) {
cpu_relax();
+ wfe();
+ }
}
-static void machine_kexec_mask_interrupts(void)
+static DEFINE_PER_CPU(call_single_data_t, cpu_stop_csd) =
+ CSD_INIT(machine_crash_nonpanic_core, NULL);
+
+void crash_smp_send_stop(void)
{
- unsigned int i;
- struct irq_desc *desc;
+ static int cpus_stopped;
+ unsigned long msecs;
+ call_single_data_t *csd;
+ int cpu, this_cpu = raw_smp_processor_id();
- for_each_irq_desc(i, desc) {
- struct irq_chip *chip;
+ if (cpus_stopped)
+ return;
- chip = irq_desc_get_chip(desc);
- if (!chip)
+ atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
+ for_each_online_cpu(cpu) {
+ if (cpu == this_cpu)
continue;
- if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
- chip->irq_eoi(&desc->irq_data);
-
- if (chip->irq_mask)
- chip->irq_mask(&desc->irq_data);
-
- if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
- chip->irq_disable(&desc->irq_data);
+ csd = &per_cpu(cpu_stop_csd, cpu);
+ smp_call_function_single_async(cpu, csd);
}
-}
-
-void machine_crash_shutdown(struct pt_regs *regs)
-{
- unsigned long msecs;
-
- local_irq_disable();
- atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
- smp_call_function(machine_crash_nonpanic_core, NULL, false);
msecs = 1000; /* Wait at most a second for the other cpus to stop */
while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
mdelay(1);
@@ -133,20 +124,24 @@ void machine_crash_shutdown(struct pt_regs *regs)
if (atomic_read(&waiting_for_crash_ipi) > 0)
pr_warn("Non-crashing CPUs did not react to IPI\n");
+ cpus_stopped = 1;
+}
+
+void machine_crash_shutdown(struct pt_regs *regs)
+{
+ local_irq_disable();
+ crash_smp_send_stop();
+
crash_save_cpu(regs, smp_processor_id());
machine_kexec_mask_interrupts();
pr_info("Loading crashdump kernel...\n");
}
-/*
- * Function pointer to optional machine-specific reinitialization
- */
-void (*kexec_reinit)(void);
-
void machine_kexec(struct kimage *image)
{
unsigned long page_list, reboot_entry_phys;
+ struct kexec_relocate_data *data;
void (*reboot_entry)(void);
void *reboot_code_buffer;
@@ -162,32 +157,21 @@ void machine_kexec(struct kimage *image)
reboot_code_buffer = page_address(image->control_code_page);
- /* Prepare parameters for reboot_code_buffer*/
- set_kernel_text_rw();
- kexec_start_address = image->start;
- kexec_indirection_page = page_list;
- kexec_mach_type = machine_arch_type;
- kexec_boot_atags = image->arch.kernel_r2;
-
/* copy our kernel relocation code to the control code page */
reboot_entry = fncpy(reboot_code_buffer,
&relocate_new_kernel,
relocate_new_kernel_size);
+ data = reboot_code_buffer + relocate_new_kernel_size;
+ data->kexec_start_address = image->start;
+ data->kexec_indirection_page = page_list;
+ data->kexec_mach_type = machine_arch_type;
+ data->kexec_r2 = image->arch.kernel_r2;
+
/* get the identity mapping physical address for the reboot code */
reboot_entry_phys = virt_to_idmap(reboot_entry);
pr_info("Bye!\n");
- if (kexec_reinit)
- kexec_reinit();
-
soft_restart(reboot_entry_phys);
}
-
-void arch_crash_save_vmcoreinfo(void)
-{
-#ifdef CONFIG_ARM_LPAE
- VMCOREINFO_CONFIG(ARM_LPAE);
-#endif
-}
diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
index 3d0c2e4dda1d..354ce16d83cb 100644
--- a/arch/arm/kernel/module-plts.c
+++ b/arch/arm/kernel/module-plts.c
@@ -1,23 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/elf.h>
+#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sort.h>
+#include <linux/moduleloader.h>
#include <asm/cache.h>
#include <asm/opcodes.h>
-#define PLT_ENT_STRIDE L1_CACHE_BYTES
-#define PLT_ENT_COUNT (PLT_ENT_STRIDE / sizeof(u32))
-#define PLT_ENT_SIZE (sizeof(struct plt_entries) / PLT_ENT_COUNT)
-
#ifdef CONFIG_THUMB2_KERNEL
#define PLT_ENT_LDR __opcode_to_mem_thumb32(0xf8dff000 | \
(PLT_ENT_STRIDE - 4))
@@ -26,24 +21,47 @@
(PLT_ENT_STRIDE - 8))
#endif
-struct plt_entries {
- u32 ldr[PLT_ENT_COUNT];
- u32 lit[PLT_ENT_COUNT];
+static const u32 fixed_plts[] = {
+#ifdef CONFIG_DYNAMIC_FTRACE
+ FTRACE_ADDR,
+ MCOUNT_ADDR,
+#endif
};
-static bool in_init(const struct module *mod, unsigned long loc)
+static void prealloc_fixed(struct mod_plt_sec *pltsec, struct plt_entries *plt)
{
- return loc - (u32)mod->init_layout.base < mod->init_layout.size;
+ int i;
+
+ if (!ARRAY_SIZE(fixed_plts) || pltsec->plt_count)
+ return;
+ pltsec->plt_count = ARRAY_SIZE(fixed_plts);
+
+ for (i = 0; i < ARRAY_SIZE(plt->ldr); ++i)
+ plt->ldr[i] = PLT_ENT_LDR;
+
+ BUILD_BUG_ON(sizeof(fixed_plts) > sizeof(plt->lit));
+ memcpy(plt->lit, fixed_plts, sizeof(fixed_plts));
}
u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
{
- struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
- &mod->arch.init;
+ struct mod_plt_sec *pltsec = !within_module_init(loc, mod) ?
+ &mod->arch.core : &mod->arch.init;
+ struct plt_entries *plt;
+ int idx;
- struct plt_entries *plt = (struct plt_entries *)pltsec->plt->sh_addr;
- int idx = 0;
+ /* cache the address, ELF header is available only during module load */
+ if (!pltsec->plt_ent)
+ pltsec->plt_ent = (struct plt_entries *)pltsec->plt->sh_addr;
+ plt = pltsec->plt_ent;
+ prealloc_fixed(pltsec, plt);
+
+ for (idx = 0; idx < ARRAY_SIZE(fixed_plts); ++idx)
+ if (plt->lit[idx] == val)
+ return (u32)&plt->ldr[idx];
+
+ idx = 0;
/*
* Look for an existing entry pointing to 'val'. Given that the
* relocations are sorted, this will be the last entry we allocated.
@@ -191,8 +209,8 @@ static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod)
{
- unsigned long core_plts = 0;
- unsigned long init_plts = 0;
+ unsigned long core_plts = ARRAY_SIZE(fixed_plts);
+ unsigned long init_plts = ARRAY_SIZE(fixed_plts);
Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
Elf32_Sym *syms = NULL;
@@ -233,7 +251,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
/* sort by type and symbol index */
sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);
- if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
+ if (!module_init_layout_section(secstrings + dstsec->sh_name))
core_plts += count_plts(syms, dstsec->sh_addr, rels,
numrels, s->sh_info);
else
@@ -247,6 +265,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
sizeof(struct plt_entries));
mod->arch.core.plt_count = 0;
+ mod->arch.core.plt_ent = NULL;
mod->arch.init.plt->sh_type = SHT_NOBITS;
mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
@@ -254,8 +273,21 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
sizeof(struct plt_entries));
mod->arch.init.plt_count = 0;
+ mod->arch.init.plt_ent = NULL;
pr_debug("%s: plt=%x, init.plt=%x\n", __func__,
mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size);
return 0;
}
+
+bool in_module_plt(unsigned long loc)
+{
+ struct module *mod;
+ bool ret;
+
+ guard(rcu)();
+ mod = __module_text_address(loc);
+ ret = mod && (loc - (u32)mod->arch.core.plt_ent < mod->arch.core.plt_count * PLT_ENT_SIZE ||
+ loc - (u32)mod->arch.init.plt_ent < mod->arch.init.plt_count * PLT_ENT_SIZE);
+ return ret;
+}
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 3ff571c2c71c..55ca3fcd37e8 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/module.c
*
* Copyright (C) 2002 Russell King.
* Modified for nommu by Hyok S. Choi
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Module allocation method suggested by Andi Kleen.
*/
#include <linux/module.h>
@@ -15,46 +12,63 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/elf.h>
-#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
-#include <linux/gfp.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/smp_plat.h>
#include <asm/unwind.h>
#include <asm/opcodes.h>
-#ifdef CONFIG_XIP_KERNEL
+bool module_init_section(const char *name)
+{
+ return strstarts(name, ".init") ||
+ strstarts(name, ".ARM.extab.init") ||
+ strstarts(name, ".ARM.exidx.init");
+}
+
+bool module_exit_section(const char *name)
+{
+ return strstarts(name, ".exit") ||
+ strstarts(name, ".ARM.extab.exit") ||
+ strstarts(name, ".ARM.exidx.exit");
+}
+
+#ifdef CONFIG_ARM_HAS_GROUP_RELOCS
/*
- * The XIP kernel text is mapped in the module area for modules and
- * some other stuff to work without any indirect relocations.
- * MODULES_VADDR is redefined here and not in asm/memory.h to avoid
- * recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off.
+ * This implements the partitioning algorithm for group relocations as
+ * documented in the ARM AArch32 ELF psABI (IHI 0044).
+ *
+ * A single PC-relative symbol reference is divided in up to 3 add or subtract
+ * operations, where the final one could be incorporated into a load/store
+ * instruction with immediate offset. E.g.,
+ *
+ * ADD Rd, PC, #... or ADD Rd, PC, #...
+ * ADD Rd, Rd, #... ADD Rd, Rd, #...
+ * LDR Rd, [Rd, #...] ADD Rd, Rd, #...
+ *
+ * The latter has a guaranteed range of only 16 MiB (3x8 == 24 bits), so it is
+ * of limited use in the kernel. However, the ADD/ADD/LDR combo has a range of
+ * -/+ 256 MiB, (2x8 + 12 == 28 bits), which means it has sufficient range for
+ * any in-kernel symbol reference (unless module PLTs are being used).
+ *
+ * The main advantage of this approach over the typical pattern using a literal
+ * load is that literal loads may miss in the D-cache, and generally lead to
+ * lower cache efficiency for variables that are referenced often from many
+ * different places in the code.
*/
-#undef MODULES_VADDR
-#define MODULES_VADDR (((unsigned long)_exiprom + ~PMD_MASK) & PMD_MASK)
-#endif
-
-#ifdef CONFIG_MMU
-void *module_alloc(unsigned long size)
+static u32 get_group_rem(u32 group, u32 *offset)
{
- gfp_t gfp_mask = GFP_KERNEL;
- void *p;
-
- /* Silence the initial allocation */
- if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS))
- gfp_mask |= __GFP_NOWARN;
-
- p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- gfp_mask, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
- __builtin_return_address(0));
- if (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || p)
- return p;
- return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
- GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
- __builtin_return_address(0));
+ u32 val = *offset;
+ u32 shift;
+ do {
+ shift = val ? (31 - __fls(val)) & ~1 : 32;
+ *offset = val;
+ if (!val)
+ break;
+ val &= 0xffffff >> shift;
+ } while (group--);
+ return shift;
}
#endif
@@ -72,6 +86,9 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
unsigned long loc;
Elf32_Sym *sym;
const char *symname;
+#ifdef CONFIG_ARM_HAS_GROUP_RELOCS
+ u32 shift, group = 1;
+#endif
s32 offset;
u32 tmp;
#ifdef CONFIG_THUMB2_KERNEL
@@ -118,8 +135,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
offset = __mem_to_opcode_arm(*(u32 *)loc);
offset = (offset & 0x00ffffff) << 2;
- if (offset & 0x02000000)
- offset -= 0x04000000;
+ offset = sign_extend32(offset, 25);
offset += sym->st_value - loc;
@@ -175,14 +191,24 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
*(u32 *)loc |= offset & 0x7fffffff;
break;
+ case R_ARM_REL32:
+ *(u32 *)loc += sym->st_value - loc;
+ break;
+
case R_ARM_MOVW_ABS_NC:
case R_ARM_MOVT_ABS:
+ case R_ARM_MOVW_PREL_NC:
+ case R_ARM_MOVT_PREL:
offset = tmp = __mem_to_opcode_arm(*(u32 *)loc);
offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff);
- offset = (offset ^ 0x8000) - 0x8000;
+ offset = sign_extend32(offset, 15);
offset += sym->st_value;
- if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS)
+ if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_PREL ||
+ ELF32_R_TYPE(rel->r_info) == R_ARM_MOVW_PREL_NC)
+ offset -= loc;
+ if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS ||
+ ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_PREL)
offset >>= 16;
tmp &= 0xfff0f000;
@@ -192,6 +218,55 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
*(u32 *)loc = __opcode_to_mem_arm(tmp);
break;
+#ifdef CONFIG_ARM_HAS_GROUP_RELOCS
+ case R_ARM_ALU_PC_G0_NC:
+ group = 0;
+ fallthrough;
+ case R_ARM_ALU_PC_G1_NC:
+ tmp = __mem_to_opcode_arm(*(u32 *)loc);
+ offset = ror32(tmp & 0xff, (tmp & 0xf00) >> 7);
+ if (tmp & BIT(22))
+ offset = -offset;
+ offset += sym->st_value - loc;
+ if (offset < 0) {
+ offset = -offset;
+ tmp = (tmp & ~BIT(23)) | BIT(22); // SUB opcode
+ } else {
+ tmp = (tmp & ~BIT(22)) | BIT(23); // ADD opcode
+ }
+
+ shift = get_group_rem(group, &offset);
+ if (shift < 24) {
+ offset >>= 24 - shift;
+ offset |= (shift + 8) << 7;
+ }
+ *(u32 *)loc = __opcode_to_mem_arm((tmp & ~0xfff) | offset);
+ break;
+
+ case R_ARM_LDR_PC_G2:
+ tmp = __mem_to_opcode_arm(*(u32 *)loc);
+ offset = tmp & 0xfff;
+ if (~tmp & BIT(23)) // U bit cleared?
+ offset = -offset;
+ offset += sym->st_value - loc;
+ if (offset < 0) {
+ offset = -offset;
+ tmp &= ~BIT(23); // clear U bit
+ } else {
+ tmp |= BIT(23); // set U bit
+ }
+ get_group_rem(2, &offset);
+
+ if (offset > 0xfff) {
+ pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
+ module->name, relindex, i, symname,
+ ELF32_R_TYPE(rel->r_info), loc,
+ sym->st_value);
+ return -ENOEXEC;
+ }
+ *(u32 *)loc = __opcode_to_mem_arm((tmp & ~0xfff) | offset);
+ break;
+#endif
#ifdef CONFIG_THUMB2_KERNEL
case R_ARM_THM_CALL:
case R_ARM_THM_JUMP24:
@@ -234,8 +309,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
((~(j2 ^ sign) & 1) << 22) |
((upper & 0x03ff) << 12) |
((lower & 0x07ff) << 1);
- if (offset & 0x01000000)
- offset -= 0x02000000;
+ offset = sign_extend32(offset, 24);
offset += sym->st_value - loc;
/*
@@ -273,6 +347,8 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
case R_ARM_THM_MOVW_ABS_NC:
case R_ARM_THM_MOVT_ABS:
+ case R_ARM_THM_MOVW_PREL_NC:
+ case R_ARM_THM_MOVT_PREL:
upper = __mem_to_opcode_thumb16(*(u16 *)loc);
lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
@@ -289,10 +365,14 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
offset = ((upper & 0x000f) << 12) |
((upper & 0x0400) << 1) |
((lower & 0x7000) >> 4) | (lower & 0x00ff);
- offset = (offset ^ 0x8000) - 0x8000;
+ offset = sign_extend32(offset, 15);
offset += sym->st_value;
- if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS)
+ if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_PREL ||
+ ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVW_PREL_NC)
+ offset -= loc;
+ if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS ||
+ ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_PREL)
offset >>= 16;
upper = (u16)((upper & 0xfbf0) |
@@ -315,11 +395,6 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
return 0;
}
-struct mod_unwind_map {
- const Elf_Shdr *unw_sec;
- const Elf_Shdr *txt_sec;
-};
-
static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr,
const Elf_Shdr *sechdrs, const char *name)
{
@@ -343,46 +418,40 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
#ifdef CONFIG_ARM_UNWIND
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum;
- struct mod_unwind_map maps[ARM_SEC_MAX];
- int i;
+ struct list_head *unwind_list = &mod->arch.unwind_list;
- memset(maps, 0, sizeof(maps));
+ INIT_LIST_HEAD(unwind_list);
+ mod->arch.init_table = NULL;
for (s = sechdrs; s < sechdrs_end; s++) {
const char *secname = secstrs + s->sh_name;
+ const char *txtname;
+ const Elf_Shdr *txt_sec;
- if (!(s->sh_flags & SHF_ALLOC))
+ if (!(s->sh_flags & SHF_ALLOC) ||
+ s->sh_type != ELF_SECTION_UNWIND)
continue;
- if (strcmp(".ARM.exidx.init.text", secname) == 0)
- maps[ARM_SEC_INIT].unw_sec = s;
- else if (strcmp(".ARM.exidx", secname) == 0)
- maps[ARM_SEC_CORE].unw_sec = s;
- else if (strcmp(".ARM.exidx.exit.text", secname) == 0)
- maps[ARM_SEC_EXIT].unw_sec = s;
- else if (strcmp(".ARM.exidx.text.unlikely", secname) == 0)
- maps[ARM_SEC_UNLIKELY].unw_sec = s;
- else if (strcmp(".ARM.exidx.text.hot", secname) == 0)
- maps[ARM_SEC_HOT].unw_sec = s;
- else if (strcmp(".init.text", secname) == 0)
- maps[ARM_SEC_INIT].txt_sec = s;
- else if (strcmp(".text", secname) == 0)
- maps[ARM_SEC_CORE].txt_sec = s;
- else if (strcmp(".exit.text", secname) == 0)
- maps[ARM_SEC_EXIT].txt_sec = s;
- else if (strcmp(".text.unlikely", secname) == 0)
- maps[ARM_SEC_UNLIKELY].txt_sec = s;
- else if (strcmp(".text.hot", secname) == 0)
- maps[ARM_SEC_HOT].txt_sec = s;
- }
+ if (!strcmp(".ARM.exidx", secname))
+ txtname = ".text";
+ else
+ txtname = secname + strlen(".ARM.exidx");
+ txt_sec = find_mod_section(hdr, sechdrs, txtname);
- for (i = 0; i < ARM_SEC_MAX; i++)
- if (maps[i].unw_sec && maps[i].txt_sec)
- mod->arch.unwind[i] =
- unwind_table_add(maps[i].unw_sec->sh_addr,
- maps[i].unw_sec->sh_size,
- maps[i].txt_sec->sh_addr,
- maps[i].txt_sec->sh_size);
+ if (txt_sec) {
+ struct unwind_table *table =
+ unwind_table_add(s->sh_addr,
+ s->sh_size,
+ txt_sec->sh_addr,
+ txt_sec->sh_size);
+
+ list_add(&table->mod_list, unwind_list);
+
+ /* save init table for module_arch_freeing_init */
+ if (strcmp(".ARM.exidx.init.text", secname) == 0)
+ mod->arch.init_table = table;
+ }
+ }
#endif
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
s = find_mod_section(hdr, sechdrs, ".pv_table");
@@ -403,10 +472,27 @@ void
module_arch_cleanup(struct module *mod)
{
#ifdef CONFIG_ARM_UNWIND
- int i;
+ struct unwind_table *tmp;
+ struct unwind_table *n;
+
+ list_for_each_entry_safe(tmp, n,
+ &mod->arch.unwind_list, mod_list) {
+ list_del(&tmp->mod_list);
+ unwind_table_del(tmp);
+ }
+ mod->arch.init_table = NULL;
+#endif
+}
+
+void module_arch_freeing_init(struct module *mod)
+{
+#ifdef CONFIG_ARM_UNWIND
+ struct unwind_table *init = mod->arch.init_table;
- for (i = 0; i < ARM_SEC_MAX; i++)
- if (mod->arch.unwind[i])
- unwind_table_del(mod->arch.unwind[i]);
+ if (init) {
+ mod->arch.init_table = NULL;
+ list_del(&init->mod_list);
+ unwind_table_del(init);
+ }
#endif
}
diff --git a/arch/arm/kernel/module.lds b/arch/arm/kernel/module.lds
deleted file mode 100644
index eacb5c67f61e..000000000000
--- a/arch/arm/kernel/module.lds
+++ /dev/null
@@ -1,4 +0,0 @@
-SECTIONS {
- .plt : { BYTE(0) }
- .init.plt : { BYTE(0) }
-}
diff --git a/arch/arm/kernel/opcodes.c b/arch/arm/kernel/opcodes.c
index f8179c6a817f..651914947443 100644
--- a/arch/arm/kernel/opcodes.c
+++ b/arch/arm/kernel/opcodes.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/opcodes.c
*
* A32 condition code lookup feature moved from nwfpe/fpopcode.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
diff --git a/arch/arm/kernel/paravirt.c b/arch/arm/kernel/paravirt.c
index 53f371ed4568..7dd9806369fb 100644
--- a/arch/arm/kernel/paravirt.c
+++ b/arch/arm/kernel/paravirt.c
@@ -1,12 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*
* Copyright (C) 2013 Citrix Systems
*
@@ -16,10 +9,15 @@
#include <linux/export.h>
#include <linux/jump_label.h>
#include <linux/types.h>
+#include <linux/static_call.h>
#include <asm/paravirt.h>
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
-struct pv_time_ops pv_time_ops;
-EXPORT_SYMBOL_GPL(pv_time_ops);
+static u64 native_steal_clock(int cpu)
+{
+ return 0;
+}
+
+DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
index a1a34722c655..4d45e60cd46d 100644
--- a/arch/arm/kernel/patch.c
+++ b/arch/arm/kernel/patch.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/kprobes.h>
@@ -8,17 +9,17 @@
#include <asm/fixmap.h>
#include <asm/smp_plat.h>
#include <asm/opcodes.h>
-#include <asm/patch.h>
+#include <asm/text-patching.h>
struct patch {
void *addr;
unsigned int insn;
};
-static DEFINE_SPINLOCK(patch_lock);
+#ifdef CONFIG_MMU
+static DEFINE_RAW_SPINLOCK(patch_lock);
static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
- __acquires(&patch_lock)
{
unsigned int uintaddr = (uintptr_t) addr;
bool module = !core_kernel_text(uintaddr);
@@ -32,9 +33,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
return addr;
if (flags)
- spin_lock_irqsave(&patch_lock, *flags);
- else
- __acquire(&patch_lock);
+ raw_spin_lock_irqsave(&patch_lock, *flags);
set_fixmap(fixmap, page_to_phys(page));
@@ -42,15 +41,19 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
}
static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
- __releases(&patch_lock)
{
clear_fixmap(fixmap);
if (flags)
- spin_unlock_irqrestore(&patch_lock, *flags);
- else
- __release(&patch_lock);
+ raw_spin_unlock_irqrestore(&patch_lock, *flags);
}
+#else
+static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
+{
+ return addr;
+}
+static void __kprobes patch_unmap(int fixmap, unsigned long *flags) { }
+#endif
void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
{
@@ -63,8 +66,6 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
if (remap)
waddr = patch_map(addr, FIX_TEXT_POKE0, &flags);
- else
- __acquire(&patch_lock);
if (thumb2 && __opcode_is_thumb16(insn)) {
*(u16 *)waddr = __opcode_to_mem_thumb16(insn);
@@ -101,8 +102,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
if (waddr != addr) {
flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
patch_unmap(FIX_TEXT_POKE0, &flags);
- } else
- __release(&patch_lock);
+ }
flush_icache_range((uintptr_t)(addr),
(uintptr_t)(addr) + size);
diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c
index 22bf1f64d99a..a2601b1ef318 100644
--- a/arch/arm/kernel/perf_callchain.c
+++ b/arch/arm/kernel/perf_callchain.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ARM callchain support
*
@@ -36,7 +37,7 @@ user_backtrace(struct frame_tail __user *tail,
struct frame_tail buftail;
unsigned long err;
- if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
+ if (!access_ok(tail, sizeof(buftail)))
return NULL;
pagefault_disable();
@@ -63,11 +64,6 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
{
struct frame_tail __user *tail;
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
- /* We don't support guest os callchain now */
- return;
- }
-
perf_callchain_store(entry, regs->ARM_pc);
if (!current->mm)
@@ -85,13 +81,11 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
* whist unwinding the stackframe and is like a subroutine return so we use
* the PC.
*/
-static int
-callchain_trace(struct stackframe *fr,
- void *data)
+static bool
+callchain_trace(void *data, unsigned long pc)
{
struct perf_callchain_entry_ctx *entry = data;
- perf_callchain_store(entry, fr->pc);
- return 0;
+ return perf_callchain_store(entry, pc) == 0;
}
void
@@ -99,38 +93,6 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
{
struct stackframe fr;
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
- /* We don't support guest os callchain now */
- return;
- }
-
arm_get_current_stackframe(regs, &fr);
walk_stackframe(&fr, callchain_trace, entry);
}
-
-unsigned long perf_instruction_pointer(struct pt_regs *regs)
-{
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
- return perf_guest_cbs->get_guest_ip();
-
- return instruction_pointer(regs);
-}
-
-unsigned long perf_misc_flags(struct pt_regs *regs)
-{
- int misc = 0;
-
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
- if (perf_guest_cbs->is_user_mode())
- misc |= PERF_RECORD_MISC_GUEST_USER;
- else
- misc |= PERF_RECORD_MISC_GUEST_KERNEL;
- } else {
- if (user_mode(regs))
- misc |= PERF_RECORD_MISC_USER;
- else
- misc |= PERF_RECORD_MISC_KERNEL;
- }
-
- return misc;
-}
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
deleted file mode 100644
index 8226d0b71fd3..000000000000
--- a/arch/arm/kernel/perf_event_v6.c
+++ /dev/null
@@ -1,585 +0,0 @@
-/*
- * ARMv6 Performance counter handling code.
- *
- * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
- *
- * ARMv6 has 2 configurable performance counters and a single cycle counter.
- * They all share a single reset bit but can be written to zero so we can use
- * that for a reset.
- *
- * The counters can't be individually enabled or disabled so when we remove
- * one event and replace it with another we could get spurious counts from the
- * wrong event. However, we can take advantage of the fact that the
- * performance counters can export events to the event bus, and the event bus
- * itself can be monitored. This requires that we *don't* export the events to
- * the event bus. The procedure for disabling a configurable counter is:
- * - change the counter to count the ETMEXTOUT[0] signal (0x20). This
- * effectively stops the counter from counting.
- * - disable the counter's interrupt generation (each counter has it's
- * own interrupt enable bit).
- * Once stopped, the counter value can be written as 0 to reset.
- *
- * To enable a counter:
- * - enable the counter's interrupt generation.
- * - set the new event type.
- *
- * Note: the dedicated cycle counter only counts cycles and can't be
- * enabled/disabled independently of the others. When we want to disable the
- * cycle counter, we have to just disable the interrupt reporting and start
- * ignoring that counter. When re-enabling, we have to reset the value and
- * enable the interrupt.
- */
-
-#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
-
-#include <asm/cputype.h>
-#include <asm/irq_regs.h>
-
-#include <linux/of.h>
-#include <linux/perf/arm_pmu.h>
-#include <linux/platform_device.h>
-
-enum armv6_perf_types {
- ARMV6_PERFCTR_ICACHE_MISS = 0x0,
- ARMV6_PERFCTR_IBUF_STALL = 0x1,
- ARMV6_PERFCTR_DDEP_STALL = 0x2,
- ARMV6_PERFCTR_ITLB_MISS = 0x3,
- ARMV6_PERFCTR_DTLB_MISS = 0x4,
- ARMV6_PERFCTR_BR_EXEC = 0x5,
- ARMV6_PERFCTR_BR_MISPREDICT = 0x6,
- ARMV6_PERFCTR_INSTR_EXEC = 0x7,
- ARMV6_PERFCTR_DCACHE_HIT = 0x9,
- ARMV6_PERFCTR_DCACHE_ACCESS = 0xA,
- ARMV6_PERFCTR_DCACHE_MISS = 0xB,
- ARMV6_PERFCTR_DCACHE_WBACK = 0xC,
- ARMV6_PERFCTR_SW_PC_CHANGE = 0xD,
- ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF,
- ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10,
- ARMV6_PERFCTR_LSU_FULL_STALL = 0x11,
- ARMV6_PERFCTR_WBUF_DRAINED = 0x12,
- ARMV6_PERFCTR_CPU_CYCLES = 0xFF,
- ARMV6_PERFCTR_NOP = 0x20,
-};
-
-enum armv6_counters {
- ARMV6_CYCLE_COUNTER = 0,
- ARMV6_COUNTER0,
- ARMV6_COUNTER1,
-};
-
-/*
- * The hardware events that we support. We do support cache operations but
- * we have harvard caches and no way to combine instruction and data
- * accesses/misses in hardware.
- */
-static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
- PERF_MAP_ALL_UNSUPPORTED,
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6_PERFCTR_IBUF_STALL,
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6_PERFCTR_LSU_FULL_STALL,
-};
-
-static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- PERF_CACHE_MAP_ALL_UNSUPPORTED,
-
- /*
- * The performance counters don't differentiate between read and write
- * accesses/misses so this isn't strictly correct, but it's the best we
- * can do. Writes and reads get combined.
- */
- [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
- [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
- [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
- [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
-
- [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
-
- /*
- * The ARM performance counters can count micro DTLB misses, micro ITLB
- * misses and main TLB misses. There isn't an event for TLB misses, so
- * use the micro misses here and if users want the main TLB misses they
- * can use a raw counter.
- */
- [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
- [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
-
- [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
- [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
-};
-
-enum armv6mpcore_perf_types {
- ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0,
- ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1,
- ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2,
- ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3,
- ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4,
- ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5,
- ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6,
- ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7,
- ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8,
- ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA,
- ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB,
- ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC,
- ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD,
- ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE,
- ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF,
- ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10,
- ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11,
- ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12,
- ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13,
- ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF,
-};
-
-/*
- * The hardware events that we support. We do support cache operations but
- * we have harvard caches and no way to combine instruction and data
- * accesses/misses in hardware.
- */
-static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
- PERF_MAP_ALL_UNSUPPORTED,
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6MPCORE_PERFCTR_IBUF_STALL,
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6MPCORE_PERFCTR_LSU_FULL_STALL,
-};
-
-static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- PERF_CACHE_MAP_ALL_UNSUPPORTED,
-
- [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
- [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
- [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
- [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
-
- [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
-
- /*
- * The ARM performance counters can count micro DTLB misses, micro ITLB
- * misses and main TLB misses. There isn't an event for TLB misses, so
- * use the micro misses here and if users want the main TLB misses they
- * can use a raw counter.
- */
- [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
- [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
-
- [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
- [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
-};
-
-static inline unsigned long
-armv6_pmcr_read(void)
-{
- u32 val;
- asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val));
- return val;
-}
-
-static inline void
-armv6_pmcr_write(unsigned long val)
-{
- asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val));
-}
-
-#define ARMV6_PMCR_ENABLE (1 << 0)
-#define ARMV6_PMCR_CTR01_RESET (1 << 1)
-#define ARMV6_PMCR_CCOUNT_RESET (1 << 2)
-#define ARMV6_PMCR_CCOUNT_DIV (1 << 3)
-#define ARMV6_PMCR_COUNT0_IEN (1 << 4)
-#define ARMV6_PMCR_COUNT1_IEN (1 << 5)
-#define ARMV6_PMCR_CCOUNT_IEN (1 << 6)
-#define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8)
-#define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9)
-#define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10)
-#define ARMV6_PMCR_EVT_COUNT0_SHIFT 20
-#define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
-#define ARMV6_PMCR_EVT_COUNT1_SHIFT 12
-#define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
-
-#define ARMV6_PMCR_OVERFLOWED_MASK \
- (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
- ARMV6_PMCR_CCOUNT_OVERFLOW)
-
-static inline int
-armv6_pmcr_has_overflowed(unsigned long pmcr)
-{
- return pmcr & ARMV6_PMCR_OVERFLOWED_MASK;
-}
-
-static inline int
-armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
- enum armv6_counters counter)
-{
- int ret = 0;
-
- if (ARMV6_CYCLE_COUNTER == counter)
- ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
- else if (ARMV6_COUNTER0 == counter)
- ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
- else if (ARMV6_COUNTER1 == counter)
- ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
- else
- WARN_ONCE(1, "invalid counter number (%d)\n", counter);
-
- return ret;
-}
-
-static inline u32 armv6pmu_read_counter(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- int counter = hwc->idx;
- unsigned long value = 0;
-
- if (ARMV6_CYCLE_COUNTER == counter)
- asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value));
- else if (ARMV6_COUNTER0 == counter)
- asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value));
- else if (ARMV6_COUNTER1 == counter)
- asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value));
- else
- WARN_ONCE(1, "invalid counter number (%d)\n", counter);
-
- return value;
-}
-
-static inline void armv6pmu_write_counter(struct perf_event *event, u32 value)
-{
- struct hw_perf_event *hwc = &event->hw;
- int counter = hwc->idx;
-
- if (ARMV6_CYCLE_COUNTER == counter)
- asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value));
- else if (ARMV6_COUNTER0 == counter)
- asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value));
- else if (ARMV6_COUNTER1 == counter)
- asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value));
- else
- WARN_ONCE(1, "invalid counter number (%d)\n", counter);
-}
-
-static void armv6pmu_enable_event(struct perf_event *event)
-{
- unsigned long val, mask, evt, flags;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
- int idx = hwc->idx;
-
- if (ARMV6_CYCLE_COUNTER == idx) {
- mask = 0;
- evt = ARMV6_PMCR_CCOUNT_IEN;
- } else if (ARMV6_COUNTER0 == idx) {
- mask = ARMV6_PMCR_EVT_COUNT0_MASK;
- evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
- ARMV6_PMCR_COUNT0_IEN;
- } else if (ARMV6_COUNTER1 == idx) {
- mask = ARMV6_PMCR_EVT_COUNT1_MASK;
- evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
- ARMV6_PMCR_COUNT1_IEN;
- } else {
- WARN_ONCE(1, "invalid counter number (%d)\n", idx);
- return;
- }
-
- /*
- * Mask out the current event and set the counter to count the event
- * that we're interested in.
- */
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- val = armv6_pmcr_read();
- val &= ~mask;
- val |= evt;
- armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static irqreturn_t
-armv6pmu_handle_irq(int irq_num,
- void *dev)
-{
- unsigned long pmcr = armv6_pmcr_read();
- struct perf_sample_data data;
- struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
- struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
- struct pt_regs *regs;
- int idx;
-
- if (!armv6_pmcr_has_overflowed(pmcr))
- return IRQ_NONE;
-
- regs = get_irq_regs();
-
- /*
- * The interrupts are cleared by writing the overflow flags back to
- * the control register. All of the other bits don't have any effect
- * if they are rewritten, so write the whole value back.
- */
- armv6_pmcr_write(pmcr);
-
- for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
- struct perf_event *event = cpuc->events[idx];
- struct hw_perf_event *hwc;
-
- /* Ignore if we don't have an event. */
- if (!event)
- continue;
-
- /*
- * We have a single interrupt for all counters. Check that
- * each counter has overflowed before we process it.
- */
- if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
- continue;
-
- hwc = &event->hw;
- armpmu_event_update(event);
- perf_sample_data_init(&data, 0, hwc->last_period);
- if (!armpmu_event_set_period(event))
- continue;
-
- if (perf_event_overflow(event, &data, regs))
- cpu_pmu->disable(event);
- }
-
- /*
- * Handle the pending perf events.
- *
- * Note: this call *must* be run with interrupts disabled. For
- * platforms that can have the PMU interrupts raised as an NMI, this
- * will not work.
- */
- irq_work_run();
-
- return IRQ_HANDLED;
-}
-
-static void armv6pmu_start(struct arm_pmu *cpu_pmu)
-{
- unsigned long flags, val;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- val = armv6_pmcr_read();
- val |= ARMV6_PMCR_ENABLE;
- armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
-{
- unsigned long flags, val;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- val = armv6_pmcr_read();
- val &= ~ARMV6_PMCR_ENABLE;
- armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static int
-armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
- struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- /* Always place a cycle counter into the cycle counter. */
- if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) {
- if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
- return -EAGAIN;
-
- return ARMV6_CYCLE_COUNTER;
- } else {
- /*
- * For anything other than a cycle counter, try and use
- * counter0 and counter1.
- */
- if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask))
- return ARMV6_COUNTER1;
-
- if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask))
- return ARMV6_COUNTER0;
-
- /* The counters are all in use. */
- return -EAGAIN;
- }
-}
-
-static void armv6pmu_disable_event(struct perf_event *event)
-{
- unsigned long val, mask, evt, flags;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
- int idx = hwc->idx;
-
- if (ARMV6_CYCLE_COUNTER == idx) {
- mask = ARMV6_PMCR_CCOUNT_IEN;
- evt = 0;
- } else if (ARMV6_COUNTER0 == idx) {
- mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
- evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
- } else if (ARMV6_COUNTER1 == idx) {
- mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
- evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
- } else {
- WARN_ONCE(1, "invalid counter number (%d)\n", idx);
- return;
- }
-
- /*
- * Mask out the current event and set the counter to count the number
- * of ETM bus signal assertion cycles. The external reporting should
- * be disabled and so this should never increment.
- */
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- val = armv6_pmcr_read();
- val &= ~mask;
- val |= evt;
- armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static void armv6mpcore_pmu_disable_event(struct perf_event *event)
-{
- unsigned long val, mask, flags, evt = 0;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
- int idx = hwc->idx;
-
- if (ARMV6_CYCLE_COUNTER == idx) {
- mask = ARMV6_PMCR_CCOUNT_IEN;
- } else if (ARMV6_COUNTER0 == idx) {
- mask = ARMV6_PMCR_COUNT0_IEN;
- } else if (ARMV6_COUNTER1 == idx) {
- mask = ARMV6_PMCR_COUNT1_IEN;
- } else {
- WARN_ONCE(1, "invalid counter number (%d)\n", idx);
- return;
- }
-
- /*
- * Unlike UP ARMv6, we don't have a way of stopping the counters. We
- * simply disable the interrupt reporting.
- */
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- val = armv6_pmcr_read();
- val &= ~mask;
- val |= evt;
- armv6_pmcr_write(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static int armv6_map_event(struct perf_event *event)
-{
- return armpmu_map_event(event, &armv6_perf_map,
- &armv6_perf_cache_map, 0xFF);
-}
-
-static void armv6pmu_init(struct arm_pmu *cpu_pmu)
-{
- cpu_pmu->handle_irq = armv6pmu_handle_irq;
- cpu_pmu->enable = armv6pmu_enable_event;
- cpu_pmu->disable = armv6pmu_disable_event;
- cpu_pmu->read_counter = armv6pmu_read_counter;
- cpu_pmu->write_counter = armv6pmu_write_counter;
- cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
- cpu_pmu->start = armv6pmu_start;
- cpu_pmu->stop = armv6pmu_stop;
- cpu_pmu->map_event = armv6_map_event;
- cpu_pmu->num_events = 3;
- cpu_pmu->max_period = (1LLU << 32) - 1;
-}
-
-static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
-{
- armv6pmu_init(cpu_pmu);
- cpu_pmu->name = "armv6_1136";
- return 0;
-}
-
-static int armv6_1156_pmu_init(struct arm_pmu *cpu_pmu)
-{
- armv6pmu_init(cpu_pmu);
- cpu_pmu->name = "armv6_1156";
- return 0;
-}
-
-static int armv6_1176_pmu_init(struct arm_pmu *cpu_pmu)
-{
- armv6pmu_init(cpu_pmu);
- cpu_pmu->name = "armv6_1176";
- return 0;
-}
-
-/*
- * ARMv6mpcore is almost identical to single core ARMv6 with the exception
- * that some of the events have different enumerations and that there is no
- * *hack* to stop the programmable counters. To stop the counters we simply
- * disable the interrupt reporting and update the event. When unthrottling we
- * reset the period and enable the interrupt reporting.
- */
-
-static int armv6mpcore_map_event(struct perf_event *event)
-{
- return armpmu_map_event(event, &armv6mpcore_perf_map,
- &armv6mpcore_perf_cache_map, 0xFF);
-}
-
-static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
-{
- cpu_pmu->name = "armv6_11mpcore";
- cpu_pmu->handle_irq = armv6pmu_handle_irq;
- cpu_pmu->enable = armv6pmu_enable_event;
- cpu_pmu->disable = armv6mpcore_pmu_disable_event;
- cpu_pmu->read_counter = armv6pmu_read_counter;
- cpu_pmu->write_counter = armv6pmu_write_counter;
- cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
- cpu_pmu->start = armv6pmu_start;
- cpu_pmu->stop = armv6pmu_stop;
- cpu_pmu->map_event = armv6mpcore_map_event;
- cpu_pmu->num_events = 3;
- cpu_pmu->max_period = (1LLU << 32) - 1;
-
- return 0;
-}
-
-static const struct of_device_id armv6_pmu_of_device_ids[] = {
- {.compatible = "arm,arm11mpcore-pmu", .data = armv6mpcore_pmu_init},
- {.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init},
- {.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init},
- { /* sentinel value */ }
-};
-
-static const struct pmu_probe_info armv6_pmu_probe_table[] = {
- ARM_PMU_PROBE(ARM_CPU_PART_ARM1136, armv6_1136_pmu_init),
- ARM_PMU_PROBE(ARM_CPU_PART_ARM1156, armv6_1156_pmu_init),
- ARM_PMU_PROBE(ARM_CPU_PART_ARM1176, armv6_1176_pmu_init),
- ARM_PMU_PROBE(ARM_CPU_PART_ARM11MPCORE, armv6mpcore_pmu_init),
- { /* sentinel value */ }
-};
-
-static int armv6_pmu_device_probe(struct platform_device *pdev)
-{
- return arm_pmu_device_probe(pdev, armv6_pmu_of_device_ids,
- armv6_pmu_probe_table);
-}
-
-static struct platform_driver armv6_pmu_driver = {
- .driver = {
- .name = "armv6-pmu",
- .of_match_table = armv6_pmu_of_device_ids,
- },
- .probe = armv6_pmu_device_probe,
-};
-
-builtin_platform_driver(armv6_pmu_driver);
-#endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
deleted file mode 100644
index ab6522b43659..000000000000
--- a/arch/arm/kernel/perf_event_v7.c
+++ /dev/null
@@ -1,2038 +0,0 @@
-/*
- * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
- *
- * ARMv7 support: Jean Pihet <jpihet@mvista.com>
- * 2010 (c) MontaVista Software, LLC.
- *
- * Copied from ARMv6 code, with the low level code inspired
- * by the ARMv7 Oprofile code.
- *
- * Cortex-A8 has up to 4 configurable performance counters and
- * a single cycle counter.
- * Cortex-A9 has up to 31 configurable performance counters and
- * a single cycle counter.
- *
- * All counters can be enabled/disabled and IRQ masked separately. The cycle
- * counter and all 4 performance counters together can be reset separately.
- */
-
-#ifdef CONFIG_CPU_V7
-
-#include <asm/cp15.h>
-#include <asm/cputype.h>
-#include <asm/irq_regs.h>
-#include <asm/vfp.h>
-#include "../vfp/vfpinstr.h"
-
-#include <linux/of.h>
-#include <linux/perf/arm_pmu.h>
-#include <linux/platform_device.h>
-
-/*
- * Common ARMv7 event types
- *
- * Note: An implementation may not be able to count all of these events
- * but the encodings are considered to be `reserved' in the case that
- * they are not available.
- */
-#define ARMV7_PERFCTR_PMNC_SW_INCR 0x00
-#define ARMV7_PERFCTR_L1_ICACHE_REFILL 0x01
-#define ARMV7_PERFCTR_ITLB_REFILL 0x02
-#define ARMV7_PERFCTR_L1_DCACHE_REFILL 0x03
-#define ARMV7_PERFCTR_L1_DCACHE_ACCESS 0x04
-#define ARMV7_PERFCTR_DTLB_REFILL 0x05
-#define ARMV7_PERFCTR_MEM_READ 0x06
-#define ARMV7_PERFCTR_MEM_WRITE 0x07
-#define ARMV7_PERFCTR_INSTR_EXECUTED 0x08
-#define ARMV7_PERFCTR_EXC_TAKEN 0x09
-#define ARMV7_PERFCTR_EXC_EXECUTED 0x0A
-#define ARMV7_PERFCTR_CID_WRITE 0x0B
-
-/*
- * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
- * It counts:
- * - all (taken) branch instructions,
- * - instructions that explicitly write the PC,
- * - exception generating instructions.
- */
-#define ARMV7_PERFCTR_PC_WRITE 0x0C
-#define ARMV7_PERFCTR_PC_IMM_BRANCH 0x0D
-#define ARMV7_PERFCTR_PC_PROC_RETURN 0x0E
-#define ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS 0x0F
-#define ARMV7_PERFCTR_PC_BRANCH_MIS_PRED 0x10
-#define ARMV7_PERFCTR_CLOCK_CYCLES 0x11
-#define ARMV7_PERFCTR_PC_BRANCH_PRED 0x12
-
-/* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
-#define ARMV7_PERFCTR_MEM_ACCESS 0x13
-#define ARMV7_PERFCTR_L1_ICACHE_ACCESS 0x14
-#define ARMV7_PERFCTR_L1_DCACHE_WB 0x15
-#define ARMV7_PERFCTR_L2_CACHE_ACCESS 0x16
-#define ARMV7_PERFCTR_L2_CACHE_REFILL 0x17
-#define ARMV7_PERFCTR_L2_CACHE_WB 0x18
-#define ARMV7_PERFCTR_BUS_ACCESS 0x19
-#define ARMV7_PERFCTR_MEM_ERROR 0x1A
-#define ARMV7_PERFCTR_INSTR_SPEC 0x1B
-#define ARMV7_PERFCTR_TTBR_WRITE 0x1C
-#define ARMV7_PERFCTR_BUS_CYCLES 0x1D
-
-#define ARMV7_PERFCTR_CPU_CYCLES 0xFF
-
-/* ARMv7 Cortex-A8 specific event types */
-#define ARMV7_A8_PERFCTR_L2_CACHE_ACCESS 0x43
-#define ARMV7_A8_PERFCTR_L2_CACHE_REFILL 0x44
-#define ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS 0x50
-#define ARMV7_A8_PERFCTR_STALL_ISIDE 0x56
-
-/* ARMv7 Cortex-A9 specific event types */
-#define ARMV7_A9_PERFCTR_INSTR_CORE_RENAME 0x68
-#define ARMV7_A9_PERFCTR_STALL_ICACHE 0x60
-#define ARMV7_A9_PERFCTR_STALL_DISPATCH 0x66
-
-/* ARMv7 Cortex-A5 specific event types */
-#define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL 0xc2
-#define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP 0xc3
-
-/* ARMv7 Cortex-A15 specific event types */
-#define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ 0x40
-#define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE 0x41
-#define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ 0x42
-#define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE 0x43
-
-#define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ 0x4C
-#define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE 0x4D
-
-#define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ 0x50
-#define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE 0x51
-#define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ 0x52
-#define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE 0x53
-
-#define ARMV7_A15_PERFCTR_PC_WRITE_SPEC 0x76
-
-/* ARMv7 Cortex-A12 specific event types */
-#define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ 0x40
-#define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE 0x41
-
-#define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ 0x50
-#define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE 0x51
-
-#define ARMV7_A12_PERFCTR_PC_WRITE_SPEC 0x76
-
-#define ARMV7_A12_PERFCTR_PF_TLB_REFILL 0xe7
-
-/* ARMv7 Krait specific event types */
-#define KRAIT_PMRESR0_GROUP0 0xcc
-#define KRAIT_PMRESR1_GROUP0 0xd0
-#define KRAIT_PMRESR2_GROUP0 0xd4
-#define KRAIT_VPMRESR0_GROUP0 0xd8
-
-#define KRAIT_PERFCTR_L1_ICACHE_ACCESS 0x10011
-#define KRAIT_PERFCTR_L1_ICACHE_MISS 0x10010
-
-#define KRAIT_PERFCTR_L1_ITLB_ACCESS 0x12222
-#define KRAIT_PERFCTR_L1_DTLB_ACCESS 0x12210
-
-/* ARMv7 Scorpion specific event types */
-#define SCORPION_LPM0_GROUP0 0x4c
-#define SCORPION_LPM1_GROUP0 0x50
-#define SCORPION_LPM2_GROUP0 0x54
-#define SCORPION_L2LPM_GROUP0 0x58
-#define SCORPION_VLPM_GROUP0 0x5c
-
-#define SCORPION_ICACHE_ACCESS 0x10053
-#define SCORPION_ICACHE_MISS 0x10052
-
-#define SCORPION_DTLB_ACCESS 0x12013
-#define SCORPION_DTLB_MISS 0x12012
-
-#define SCORPION_ITLB_MISS 0x12021
-
-/*
- * Cortex-A8 HW events mapping
- *
- * The hardware events that we support. We do support cache operations but
- * we have harvard caches and no way to combine instruction and data
- * accesses/misses in hardware.
- */
-static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
- PERF_MAP_ALL_UNSUPPORTED,
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
-};
-
-static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- PERF_CACHE_MAP_ALL_UNSUPPORTED,
-
- /*
- * The performance counters don't differentiate between read and write
- * accesses/misses so this isn't strictly correct, but it's the best we
- * can do. Writes and reads get combined.
- */
- [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
-
- [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
- [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
-
- [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
- [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
- [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
- [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
-
- [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
- [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
-
- [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
- [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
-
- [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
-};
-
-/*
- * Cortex-A9 HW events mapping
- */
-static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
- PERF_MAP_ALL_UNSUPPORTED,
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH,
-};
-
-static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- PERF_CACHE_MAP_ALL_UNSUPPORTED,
-
- /*
- * The performance counters don't differentiate between read and write
- * accesses/misses so this isn't strictly correct, but it's the best we
- * can do. Writes and reads get combined.
- */
- [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
-
- [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
-
- [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
- [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
-
- [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
- [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
-
- [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
-};
-
-/*
- * Cortex-A5 HW events mapping
- */
-static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
- PERF_MAP_ALL_UNSUPPORTED,
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
-};
-
-static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- PERF_CACHE_MAP_ALL_UNSUPPORTED,
-
- [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
- [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
-
- [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
- [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
- /*
- * The prefetch counters don't differentiate between the I side and the
- * D side.
- */
- [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
- [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
-
- [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
- [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
-
- [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
- [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
-
- [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
-};
-
-/*
- * Cortex-A15 HW events mapping
- */
-static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
- PERF_MAP_ALL_UNSUPPORTED,
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
-};
-
-static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- PERF_CACHE_MAP_ALL_UNSUPPORTED,
-
- [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
- [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
- [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
- [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
-
- /*
- * Not all performance counters differentiate between read and write
- * accesses/misses so we're not always strictly correct, but it's the
- * best we can do. Writes and reads get combined in these cases.
- */
- [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
- [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
-
- [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
- [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
- [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
- [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
-
- [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
- [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
-
- [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
- [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
-
- [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
-};
-
-/*
- * Cortex-A7 HW events mapping
- */
-static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
- PERF_MAP_ALL_UNSUPPORTED,
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
-};
-
-static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- PERF_CACHE_MAP_ALL_UNSUPPORTED,
-
- /*
- * The performance counters don't differentiate between read and write
- * accesses/misses so this isn't strictly correct, but it's the best we
- * can do. Writes and reads get combined.
- */
- [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
-
- [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
- [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
-
- [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
- [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
- [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
- [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
-
- [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
- [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
-
- [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
- [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
-
- [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
-};
-
-/*
- * Cortex-A12 HW events mapping
- */
-static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
- PERF_MAP_ALL_UNSUPPORTED,
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
- [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A12_PERFCTR_PC_WRITE_SPEC,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
-};
-
-static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- PERF_CACHE_MAP_ALL_UNSUPPORTED,
-
- [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ,
- [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE,
- [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
-
- /*
- * Not all performance counters differentiate between read and write
- * accesses/misses so we're not always strictly correct, but it's the
- * best we can do. Writes and reads get combined in these cases.
- */
- [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
- [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
-
- [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
- [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
- [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
- [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
-
- [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
- [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
- [C(DTLB)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A12_PERFCTR_PF_TLB_REFILL,
-
- [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
- [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
-
- [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
-};
-
-/*
- * Krait HW events mapping
- */
-static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
- PERF_MAP_ALL_UNSUPPORTED,
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
-};
-
-static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
- PERF_MAP_ALL_UNSUPPORTED,
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
-};
-
-static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- PERF_CACHE_MAP_ALL_UNSUPPORTED,
-
- /*
- * The performance counters don't differentiate between read and write
- * accesses/misses so this isn't strictly correct, but it's the best we
- * can do. Writes and reads get combined.
- */
- [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
-
- [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ICACHE_ACCESS,
- [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = KRAIT_PERFCTR_L1_ICACHE_MISS,
-
- [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
- [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
-
- [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
- [C(ITLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
-
- [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
-};
-
-/*
- * Scorpion HW events mapping
- */
-static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = {
- PERF_MAP_ALL_UNSUPPORTED,
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
- [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
-};
-
-static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- PERF_CACHE_MAP_ALL_UNSUPPORTED,
- /*
- * The performance counters don't differentiate between read and write
- * accesses/misses so this isn't strictly correct, but it's the best we
- * can do. Writes and reads get combined.
- */
- [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
- [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
- [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS,
- [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS,
- /*
- * Only ITLB misses and DTLB refills are supported. If users want the
- * DTLB refills misses a raw counter must be used.
- */
- [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
- [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
- [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
- [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
- [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
- [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
- [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
-};
-
-PMU_FORMAT_ATTR(event, "config:0-7");
-
-static struct attribute *armv7_pmu_format_attrs[] = {
- &format_attr_event.attr,
- NULL,
-};
-
-static struct attribute_group armv7_pmu_format_attr_group = {
- .name = "format",
- .attrs = armv7_pmu_format_attrs,
-};
-
-#define ARMV7_EVENT_ATTR_RESOLVE(m) #m
-#define ARMV7_EVENT_ATTR(name, config) \
- PMU_EVENT_ATTR_STRING(name, armv7_event_attr_##name, \
- "event=" ARMV7_EVENT_ATTR_RESOLVE(config))
-
-ARMV7_EVENT_ATTR(sw_incr, ARMV7_PERFCTR_PMNC_SW_INCR);
-ARMV7_EVENT_ATTR(l1i_cache_refill, ARMV7_PERFCTR_L1_ICACHE_REFILL);
-ARMV7_EVENT_ATTR(l1i_tlb_refill, ARMV7_PERFCTR_ITLB_REFILL);
-ARMV7_EVENT_ATTR(l1d_cache_refill, ARMV7_PERFCTR_L1_DCACHE_REFILL);
-ARMV7_EVENT_ATTR(l1d_cache, ARMV7_PERFCTR_L1_DCACHE_ACCESS);
-ARMV7_EVENT_ATTR(l1d_tlb_refill, ARMV7_PERFCTR_DTLB_REFILL);
-ARMV7_EVENT_ATTR(ld_retired, ARMV7_PERFCTR_MEM_READ);
-ARMV7_EVENT_ATTR(st_retired, ARMV7_PERFCTR_MEM_WRITE);
-ARMV7_EVENT_ATTR(inst_retired, ARMV7_PERFCTR_INSTR_EXECUTED);
-ARMV7_EVENT_ATTR(exc_taken, ARMV7_PERFCTR_EXC_TAKEN);
-ARMV7_EVENT_ATTR(exc_return, ARMV7_PERFCTR_EXC_EXECUTED);
-ARMV7_EVENT_ATTR(cid_write_retired, ARMV7_PERFCTR_CID_WRITE);
-ARMV7_EVENT_ATTR(pc_write_retired, ARMV7_PERFCTR_PC_WRITE);
-ARMV7_EVENT_ATTR(br_immed_retired, ARMV7_PERFCTR_PC_IMM_BRANCH);
-ARMV7_EVENT_ATTR(br_return_retired, ARMV7_PERFCTR_PC_PROC_RETURN);
-ARMV7_EVENT_ATTR(unaligned_ldst_retired, ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS);
-ARMV7_EVENT_ATTR(br_mis_pred, ARMV7_PERFCTR_PC_BRANCH_MIS_PRED);
-ARMV7_EVENT_ATTR(cpu_cycles, ARMV7_PERFCTR_CLOCK_CYCLES);
-ARMV7_EVENT_ATTR(br_pred, ARMV7_PERFCTR_PC_BRANCH_PRED);
-
-static struct attribute *armv7_pmuv1_event_attrs[] = {
- &armv7_event_attr_sw_incr.attr.attr,
- &armv7_event_attr_l1i_cache_refill.attr.attr,
- &armv7_event_attr_l1i_tlb_refill.attr.attr,
- &armv7_event_attr_l1d_cache_refill.attr.attr,
- &armv7_event_attr_l1d_cache.attr.attr,
- &armv7_event_attr_l1d_tlb_refill.attr.attr,
- &armv7_event_attr_ld_retired.attr.attr,
- &armv7_event_attr_st_retired.attr.attr,
- &armv7_event_attr_inst_retired.attr.attr,
- &armv7_event_attr_exc_taken.attr.attr,
- &armv7_event_attr_exc_return.attr.attr,
- &armv7_event_attr_cid_write_retired.attr.attr,
- &armv7_event_attr_pc_write_retired.attr.attr,
- &armv7_event_attr_br_immed_retired.attr.attr,
- &armv7_event_attr_br_return_retired.attr.attr,
- &armv7_event_attr_unaligned_ldst_retired.attr.attr,
- &armv7_event_attr_br_mis_pred.attr.attr,
- &armv7_event_attr_cpu_cycles.attr.attr,
- &armv7_event_attr_br_pred.attr.attr,
- NULL,
-};
-
-static struct attribute_group armv7_pmuv1_events_attr_group = {
- .name = "events",
- .attrs = armv7_pmuv1_event_attrs,
-};
-
-ARMV7_EVENT_ATTR(mem_access, ARMV7_PERFCTR_MEM_ACCESS);
-ARMV7_EVENT_ATTR(l1i_cache, ARMV7_PERFCTR_L1_ICACHE_ACCESS);
-ARMV7_EVENT_ATTR(l1d_cache_wb, ARMV7_PERFCTR_L1_DCACHE_WB);
-ARMV7_EVENT_ATTR(l2d_cache, ARMV7_PERFCTR_L2_CACHE_ACCESS);
-ARMV7_EVENT_ATTR(l2d_cache_refill, ARMV7_PERFCTR_L2_CACHE_REFILL);
-ARMV7_EVENT_ATTR(l2d_cache_wb, ARMV7_PERFCTR_L2_CACHE_WB);
-ARMV7_EVENT_ATTR(bus_access, ARMV7_PERFCTR_BUS_ACCESS);
-ARMV7_EVENT_ATTR(memory_error, ARMV7_PERFCTR_MEM_ERROR);
-ARMV7_EVENT_ATTR(inst_spec, ARMV7_PERFCTR_INSTR_SPEC);
-ARMV7_EVENT_ATTR(ttbr_write_retired, ARMV7_PERFCTR_TTBR_WRITE);
-ARMV7_EVENT_ATTR(bus_cycles, ARMV7_PERFCTR_BUS_CYCLES);
-
-static struct attribute *armv7_pmuv2_event_attrs[] = {
- &armv7_event_attr_sw_incr.attr.attr,
- &armv7_event_attr_l1i_cache_refill.attr.attr,
- &armv7_event_attr_l1i_tlb_refill.attr.attr,
- &armv7_event_attr_l1d_cache_refill.attr.attr,
- &armv7_event_attr_l1d_cache.attr.attr,
- &armv7_event_attr_l1d_tlb_refill.attr.attr,
- &armv7_event_attr_ld_retired.attr.attr,
- &armv7_event_attr_st_retired.attr.attr,
- &armv7_event_attr_inst_retired.attr.attr,
- &armv7_event_attr_exc_taken.attr.attr,
- &armv7_event_attr_exc_return.attr.attr,
- &armv7_event_attr_cid_write_retired.attr.attr,
- &armv7_event_attr_pc_write_retired.attr.attr,
- &armv7_event_attr_br_immed_retired.attr.attr,
- &armv7_event_attr_br_return_retired.attr.attr,
- &armv7_event_attr_unaligned_ldst_retired.attr.attr,
- &armv7_event_attr_br_mis_pred.attr.attr,
- &armv7_event_attr_cpu_cycles.attr.attr,
- &armv7_event_attr_br_pred.attr.attr,
- &armv7_event_attr_mem_access.attr.attr,
- &armv7_event_attr_l1i_cache.attr.attr,
- &armv7_event_attr_l1d_cache_wb.attr.attr,
- &armv7_event_attr_l2d_cache.attr.attr,
- &armv7_event_attr_l2d_cache_refill.attr.attr,
- &armv7_event_attr_l2d_cache_wb.attr.attr,
- &armv7_event_attr_bus_access.attr.attr,
- &armv7_event_attr_memory_error.attr.attr,
- &armv7_event_attr_inst_spec.attr.attr,
- &armv7_event_attr_ttbr_write_retired.attr.attr,
- &armv7_event_attr_bus_cycles.attr.attr,
- NULL,
-};
-
-static struct attribute_group armv7_pmuv2_events_attr_group = {
- .name = "events",
- .attrs = armv7_pmuv2_event_attrs,
-};
-
-/*
- * Perf Events' indices
- */
-#define ARMV7_IDX_CYCLE_COUNTER 0
-#define ARMV7_IDX_COUNTER0 1
-#define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
- (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
-
-#define ARMV7_MAX_COUNTERS 32
-#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
-
-/*
- * ARMv7 low level PMNC access
- */
-
-/*
- * Perf Event to low level counters mapping
- */
-#define ARMV7_IDX_TO_COUNTER(x) \
- (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
-
-/*
- * Per-CPU PMNC: config reg
- */
-#define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
-#define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
-#define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
-#define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
-#define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
-#define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
-#define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
-#define ARMV7_PMNC_N_MASK 0x1f
-#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
-
-/*
- * FLAG: counters overflow flag status reg
- */
-#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
-#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
-
-/*
- * PMXEVTYPER: Event selection reg
- */
-#define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
-#define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
-
-/*
- * Event filters for PMUv2
- */
-#define ARMV7_EXCLUDE_PL1 (1 << 31)
-#define ARMV7_EXCLUDE_USER (1 << 30)
-#define ARMV7_INCLUDE_HYP (1 << 27)
-
-/*
- * Secure debug enable reg
- */
-#define ARMV7_SDER_SUNIDEN BIT(1) /* Permit non-invasive debug */
-
-static inline u32 armv7_pmnc_read(void)
-{
- u32 val;
- asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
- return val;
-}
-
-static inline void armv7_pmnc_write(u32 val)
-{
- val &= ARMV7_PMNC_MASK;
- isb();
- asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
-}
-
-static inline int armv7_pmnc_has_overflowed(u32 pmnc)
-{
- return pmnc & ARMV7_OVERFLOWED_MASK;
-}
-
-static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
-{
- return idx >= ARMV7_IDX_CYCLE_COUNTER &&
- idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
-}
-
-static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
-{
- return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
-}
-
-static inline void armv7_pmnc_select_counter(int idx)
-{
- u32 counter = ARMV7_IDX_TO_COUNTER(idx);
- asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
- isb();
-}
-
-static inline u32 armv7pmu_read_counter(struct perf_event *event)
-{
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
- u32 value = 0;
-
- if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
- pr_err("CPU%u reading wrong counter %d\n",
- smp_processor_id(), idx);
- } else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
- asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
- } else {
- armv7_pmnc_select_counter(idx);
- asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
- }
-
- return value;
-}
-
-static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
-{
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
-
- if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
- pr_err("CPU%u writing wrong counter %d\n",
- smp_processor_id(), idx);
- } else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
- asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
- } else {
- armv7_pmnc_select_counter(idx);
- asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
- }
-}
-
-static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
-{
- armv7_pmnc_select_counter(idx);
- val &= ARMV7_EVTYPE_MASK;
- asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
-}
-
-static inline void armv7_pmnc_enable_counter(int idx)
-{
- u32 counter = ARMV7_IDX_TO_COUNTER(idx);
- asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
-}
-
-static inline void armv7_pmnc_disable_counter(int idx)
-{
- u32 counter = ARMV7_IDX_TO_COUNTER(idx);
- asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
-}
-
-static inline void armv7_pmnc_enable_intens(int idx)
-{
- u32 counter = ARMV7_IDX_TO_COUNTER(idx);
- asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
-}
-
-static inline void armv7_pmnc_disable_intens(int idx)
-{
- u32 counter = ARMV7_IDX_TO_COUNTER(idx);
- asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
- isb();
- /* Clear the overflow flag in case an interrupt is pending. */
- asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
- isb();
-}
-
-static inline u32 armv7_pmnc_getreset_flags(void)
-{
- u32 val;
-
- /* Read */
- asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
-
- /* Write to clear flags */
- val &= ARMV7_FLAG_MASK;
- asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
-
- return val;
-}
-
-#ifdef DEBUG
-static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
-{
- u32 val;
- unsigned int cnt;
-
- pr_info("PMNC registers dump:\n");
-
- asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
- pr_info("PMNC =0x%08x\n", val);
-
- asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
- pr_info("CNTENS=0x%08x\n", val);
-
- asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
- pr_info("INTENS=0x%08x\n", val);
-
- asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
- pr_info("FLAGS =0x%08x\n", val);
-
- asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
- pr_info("SELECT=0x%08x\n", val);
-
- asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
- pr_info("CCNT =0x%08x\n", val);
-
- for (cnt = ARMV7_IDX_COUNTER0;
- cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
- armv7_pmnc_select_counter(cnt);
- asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
- pr_info("CNT[%d] count =0x%08x\n",
- ARMV7_IDX_TO_COUNTER(cnt), val);
- asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
- pr_info("CNT[%d] evtsel=0x%08x\n",
- ARMV7_IDX_TO_COUNTER(cnt), val);
- }
-}
-#endif
-
-static void armv7pmu_enable_event(struct perf_event *event)
-{
- unsigned long flags;
- struct hw_perf_event *hwc = &event->hw;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
- int idx = hwc->idx;
-
- if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
- pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
- smp_processor_id(), idx);
- return;
- }
-
- /*
- * Enable counter and interrupt, and set the counter to count
- * the event that we're interested in.
- */
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
-
- /*
- * Disable counter
- */
- armv7_pmnc_disable_counter(idx);
-
- /*
- * Set event (if destined for PMNx counters)
- * We only need to set the event for the cycle counter if we
- * have the ability to perform event filtering.
- */
- if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
- armv7_pmnc_write_evtsel(idx, hwc->config_base);
-
- /*
- * Enable interrupt for this counter
- */
- armv7_pmnc_enable_intens(idx);
-
- /*
- * Enable counter
- */
- armv7_pmnc_enable_counter(idx);
-
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static void armv7pmu_disable_event(struct perf_event *event)
-{
- unsigned long flags;
- struct hw_perf_event *hwc = &event->hw;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
- int idx = hwc->idx;
-
- if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
- pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
- smp_processor_id(), idx);
- return;
- }
-
- /*
- * Disable counter and interrupt
- */
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
-
- /*
- * Disable counter
- */
- armv7_pmnc_disable_counter(idx);
-
- /*
- * Disable interrupt for this counter
- */
- armv7_pmnc_disable_intens(idx);
-
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
-{
- u32 pmnc;
- struct perf_sample_data data;
- struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
- struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
- struct pt_regs *regs;
- int idx;
-
- /*
- * Get and reset the IRQ flags
- */
- pmnc = armv7_pmnc_getreset_flags();
-
- /*
- * Did an overflow occur?
- */
- if (!armv7_pmnc_has_overflowed(pmnc))
- return IRQ_NONE;
-
- /*
- * Handle the counter(s) overflow(s)
- */
- regs = get_irq_regs();
-
- for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
- struct perf_event *event = cpuc->events[idx];
- struct hw_perf_event *hwc;
-
- /* Ignore if we don't have an event. */
- if (!event)
- continue;
-
- /*
- * We have a single interrupt for all counters. Check that
- * each counter has overflowed before we process it.
- */
- if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
- continue;
-
- hwc = &event->hw;
- armpmu_event_update(event);
- perf_sample_data_init(&data, 0, hwc->last_period);
- if (!armpmu_event_set_period(event))
- continue;
-
- if (perf_event_overflow(event, &data, regs))
- cpu_pmu->disable(event);
- }
-
- /*
- * Handle the pending perf events.
- *
- * Note: this call *must* be run with interrupts disabled. For
- * platforms that can have the PMU interrupts raised as an NMI, this
- * will not work.
- */
- irq_work_run();
-
- return IRQ_HANDLED;
-}
-
-static void armv7pmu_start(struct arm_pmu *cpu_pmu)
-{
- unsigned long flags;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- /* Enable all counters */
- armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
-{
- unsigned long flags;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- /* Disable all counters */
- armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
- struct perf_event *event)
-{
- int idx;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
-
- /* Always place a cycle counter into the cycle counter. */
- if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
- if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
- return -EAGAIN;
-
- return ARMV7_IDX_CYCLE_COUNTER;
- }
-
- /*
- * For anything other than a cycle counter, try and use
- * the events counters
- */
- for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
- if (!test_and_set_bit(idx, cpuc->used_mask))
- return idx;
- }
-
- /* The counters are all in use. */
- return -EAGAIN;
-}
-
-/*
- * Add an event filter to a given event. This will only work for PMUv2 PMUs.
- */
-static int armv7pmu_set_event_filter(struct hw_perf_event *event,
- struct perf_event_attr *attr)
-{
- unsigned long config_base = 0;
-
- if (attr->exclude_idle)
- return -EPERM;
- if (attr->exclude_user)
- config_base |= ARMV7_EXCLUDE_USER;
- if (attr->exclude_kernel)
- config_base |= ARMV7_EXCLUDE_PL1;
- if (!attr->exclude_hv)
- config_base |= ARMV7_INCLUDE_HYP;
-
- /*
- * Install the filter into config_base as this is used to
- * construct the event type.
- */
- event->config_base = config_base;
-
- return 0;
-}
-
-static void armv7pmu_reset(void *info)
-{
- struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
- u32 idx, nb_cnt = cpu_pmu->num_events, val;
-
- if (cpu_pmu->secure_access) {
- asm volatile("mrc p15, 0, %0, c1, c1, 1" : "=r" (val));
- val |= ARMV7_SDER_SUNIDEN;
- asm volatile("mcr p15, 0, %0, c1, c1, 1" : : "r" (val));
- }
-
- /* The counter and interrupt enable registers are unknown at reset. */
- for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
- armv7_pmnc_disable_counter(idx);
- armv7_pmnc_disable_intens(idx);
- }
-
- /* Initialize & Reset PMNC: C and P bits */
- armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
-}
-
-static int armv7_a8_map_event(struct perf_event *event)
-{
- return armpmu_map_event(event, &armv7_a8_perf_map,
- &armv7_a8_perf_cache_map, 0xFF);
-}
-
-static int armv7_a9_map_event(struct perf_event *event)
-{
- return armpmu_map_event(event, &armv7_a9_perf_map,
- &armv7_a9_perf_cache_map, 0xFF);
-}
-
-static int armv7_a5_map_event(struct perf_event *event)
-{
- return armpmu_map_event(event, &armv7_a5_perf_map,
- &armv7_a5_perf_cache_map, 0xFF);
-}
-
-static int armv7_a15_map_event(struct perf_event *event)
-{
- return armpmu_map_event(event, &armv7_a15_perf_map,
- &armv7_a15_perf_cache_map, 0xFF);
-}
-
-static int armv7_a7_map_event(struct perf_event *event)
-{
- return armpmu_map_event(event, &armv7_a7_perf_map,
- &armv7_a7_perf_cache_map, 0xFF);
-}
-
-static int armv7_a12_map_event(struct perf_event *event)
-{
- return armpmu_map_event(event, &armv7_a12_perf_map,
- &armv7_a12_perf_cache_map, 0xFF);
-}
-
-static int krait_map_event(struct perf_event *event)
-{
- return armpmu_map_event(event, &krait_perf_map,
- &krait_perf_cache_map, 0xFFFFF);
-}
-
-static int krait_map_event_no_branch(struct perf_event *event)
-{
- return armpmu_map_event(event, &krait_perf_map_no_branch,
- &krait_perf_cache_map, 0xFFFFF);
-}
-
-static int scorpion_map_event(struct perf_event *event)
-{
- return armpmu_map_event(event, &scorpion_perf_map,
- &scorpion_perf_cache_map, 0xFFFFF);
-}
-
-static void armv7pmu_init(struct arm_pmu *cpu_pmu)
-{
- cpu_pmu->handle_irq = armv7pmu_handle_irq;
- cpu_pmu->enable = armv7pmu_enable_event;
- cpu_pmu->disable = armv7pmu_disable_event;
- cpu_pmu->read_counter = armv7pmu_read_counter;
- cpu_pmu->write_counter = armv7pmu_write_counter;
- cpu_pmu->get_event_idx = armv7pmu_get_event_idx;
- cpu_pmu->start = armv7pmu_start;
- cpu_pmu->stop = armv7pmu_stop;
- cpu_pmu->reset = armv7pmu_reset;
- cpu_pmu->max_period = (1LLU << 32) - 1;
-};
-
-static void armv7_read_num_pmnc_events(void *info)
-{
- int *nb_cnt = info;
-
- /* Read the nb of CNTx counters supported from PMNC */
- *nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
-
- /* Add the CPU cycles counter */
- *nb_cnt += 1;
-}
-
-static int armv7_probe_num_events(struct arm_pmu *arm_pmu)
-{
- return smp_call_function_any(&arm_pmu->supported_cpus,
- armv7_read_num_pmnc_events,
- &arm_pmu->num_events, 1);
-}
-
-static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
-{
- armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "armv7_cortex_a8";
- cpu_pmu->map_event = armv7_a8_map_event;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
- &armv7_pmuv1_events_attr_group;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
- &armv7_pmu_format_attr_group;
- return armv7_probe_num_events(cpu_pmu);
-}
-
-static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
-{
- armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "armv7_cortex_a9";
- cpu_pmu->map_event = armv7_a9_map_event;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
- &armv7_pmuv1_events_attr_group;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
- &armv7_pmu_format_attr_group;
- return armv7_probe_num_events(cpu_pmu);
-}
-
-static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
-{
- armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "armv7_cortex_a5";
- cpu_pmu->map_event = armv7_a5_map_event;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
- &armv7_pmuv1_events_attr_group;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
- &armv7_pmu_format_attr_group;
- return armv7_probe_num_events(cpu_pmu);
-}
-
-static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
-{
- armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "armv7_cortex_a15";
- cpu_pmu->map_event = armv7_a15_map_event;
- cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
- &armv7_pmuv2_events_attr_group;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
- &armv7_pmu_format_attr_group;
- return armv7_probe_num_events(cpu_pmu);
-}
-
-static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
-{
- armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "armv7_cortex_a7";
- cpu_pmu->map_event = armv7_a7_map_event;
- cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
- &armv7_pmuv2_events_attr_group;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
- &armv7_pmu_format_attr_group;
- return armv7_probe_num_events(cpu_pmu);
-}
-
-static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
-{
- armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "armv7_cortex_a12";
- cpu_pmu->map_event = armv7_a12_map_event;
- cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
- &armv7_pmuv2_events_attr_group;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
- &armv7_pmu_format_attr_group;
- return armv7_probe_num_events(cpu_pmu);
-}
-
-static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
-{
- int ret = armv7_a12_pmu_init(cpu_pmu);
- cpu_pmu->name = "armv7_cortex_a17";
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
- &armv7_pmuv2_events_attr_group;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
- &armv7_pmu_format_attr_group;
- return ret;
-}
-
-/*
- * Krait Performance Monitor Region Event Selection Register (PMRESRn)
- *
- * 31 30 24 16 8 0
- * +--------------------------------+
- * PMRESR0 | EN | CC | CC | CC | CC | N = 1, R = 0
- * +--------------------------------+
- * PMRESR1 | EN | CC | CC | CC | CC | N = 1, R = 1
- * +--------------------------------+
- * PMRESR2 | EN | CC | CC | CC | CC | N = 1, R = 2
- * +--------------------------------+
- * VPMRESR0 | EN | CC | CC | CC | CC | N = 2, R = ?
- * +--------------------------------+
- * EN | G=3 | G=2 | G=1 | G=0
- *
- * Event Encoding:
- *
- * hwc->config_base = 0xNRCCG
- *
- * N = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR)
- * R = region register
- * CC = class of events the group G is choosing from
- * G = group or particular event
- *
- * Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
- *
- * A region (R) corresponds to a piece of the CPU (execution unit, instruction
- * unit, etc.) while the event code (CC) corresponds to a particular class of
- * events (interrupts for example). An event code is broken down into
- * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
- * example).
- */
-
-#define KRAIT_EVENT (1 << 16)
-#define VENUM_EVENT (2 << 16)
-#define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT)
-#define PMRESRn_EN BIT(31)
-
-#define EVENT_REGION(event) (((event) >> 12) & 0xf) /* R */
-#define EVENT_GROUP(event) ((event) & 0xf) /* G */
-#define EVENT_CODE(event) (((event) >> 4) & 0xff) /* CC */
-#define EVENT_VENUM(event) (!!(event & VENUM_EVENT)) /* N=2 */
-#define EVENT_CPU(event) (!!(event & KRAIT_EVENT)) /* N=1 */
-
-static u32 krait_read_pmresrn(int n)
-{
- u32 val;
-
- switch (n) {
- case 0:
- asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val));
- break;
- case 1:
- asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val));
- break;
- case 2:
- asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val));
- break;
- default:
- BUG(); /* Should be validated in krait_pmu_get_event_idx() */
- }
-
- return val;
-}
-
-static void krait_write_pmresrn(int n, u32 val)
-{
- switch (n) {
- case 0:
- asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val));
- break;
- case 1:
- asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val));
- break;
- case 2:
- asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val));
- break;
- default:
- BUG(); /* Should be validated in krait_pmu_get_event_idx() */
- }
-}
-
-static u32 venum_read_pmresr(void)
-{
- u32 val;
- asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
- return val;
-}
-
-static void venum_write_pmresr(u32 val)
-{
- asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
-}
-
-static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val)
-{
- u32 venum_new_val;
- u32 fp_new_val;
-
- BUG_ON(preemptible());
- /* CPACR Enable CP10 and CP11 access */
- *venum_orig_val = get_copro_access();
- venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11);
- set_copro_access(venum_new_val);
-
- /* Enable FPEXC */
- *fp_orig_val = fmrx(FPEXC);
- fp_new_val = *fp_orig_val | FPEXC_EN;
- fmxr(FPEXC, fp_new_val);
-}
-
-static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val)
-{
- BUG_ON(preemptible());
- /* Restore FPEXC */
- fmxr(FPEXC, fp_orig_val);
- isb();
- /* Restore CPACR */
- set_copro_access(venum_orig_val);
-}
-
-static u32 krait_get_pmresrn_event(unsigned int region)
-{
- static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0,
- KRAIT_PMRESR1_GROUP0,
- KRAIT_PMRESR2_GROUP0 };
- return pmresrn_table[region];
-}
-
-static void krait_evt_setup(int idx, u32 config_base)
-{
- u32 val;
- u32 mask;
- u32 vval, fval;
- unsigned int region = EVENT_REGION(config_base);
- unsigned int group = EVENT_GROUP(config_base);
- unsigned int code = EVENT_CODE(config_base);
- unsigned int group_shift;
- bool venum_event = EVENT_VENUM(config_base);
-
- group_shift = group * 8;
- mask = 0xff << group_shift;
-
- /* Configure evtsel for the region and group */
- if (venum_event)
- val = KRAIT_VPMRESR0_GROUP0;
- else
- val = krait_get_pmresrn_event(region);
- val += group;
- /* Mix in mode-exclusion bits */
- val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
- armv7_pmnc_write_evtsel(idx, val);
-
- if (venum_event) {
- venum_pre_pmresr(&vval, &fval);
- val = venum_read_pmresr();
- val &= ~mask;
- val |= code << group_shift;
- val |= PMRESRn_EN;
- venum_write_pmresr(val);
- venum_post_pmresr(vval, fval);
- } else {
- val = krait_read_pmresrn(region);
- val &= ~mask;
- val |= code << group_shift;
- val |= PMRESRn_EN;
- krait_write_pmresrn(region, val);
- }
-}
-
-static u32 clear_pmresrn_group(u32 val, int group)
-{
- u32 mask;
- int group_shift;
-
- group_shift = group * 8;
- mask = 0xff << group_shift;
- val &= ~mask;
-
- /* Don't clear enable bit if entire region isn't disabled */
- if (val & ~PMRESRn_EN)
- return val |= PMRESRn_EN;
-
- return 0;
-}
-
-static void krait_clearpmu(u32 config_base)
-{
- u32 val;
- u32 vval, fval;
- unsigned int region = EVENT_REGION(config_base);
- unsigned int group = EVENT_GROUP(config_base);
- bool venum_event = EVENT_VENUM(config_base);
-
- if (venum_event) {
- venum_pre_pmresr(&vval, &fval);
- val = venum_read_pmresr();
- val = clear_pmresrn_group(val, group);
- venum_write_pmresr(val);
- venum_post_pmresr(vval, fval);
- } else {
- val = krait_read_pmresrn(region);
- val = clear_pmresrn_group(val, group);
- krait_write_pmresrn(region, val);
- }
-}
-
-static void krait_pmu_disable_event(struct perf_event *event)
-{
- unsigned long flags;
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
- /* Disable counter and interrupt */
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
-
- /* Disable counter */
- armv7_pmnc_disable_counter(idx);
-
- /*
- * Clear pmresr code (if destined for PMNx counters)
- */
- if (hwc->config_base & KRAIT_EVENT_MASK)
- krait_clearpmu(hwc->config_base);
-
- /* Disable interrupt for this counter */
- armv7_pmnc_disable_intens(idx);
-
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static void krait_pmu_enable_event(struct perf_event *event)
-{
- unsigned long flags;
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
- /*
- * Enable counter and interrupt, and set the counter to count
- * the event that we're interested in.
- */
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
-
- /* Disable counter */
- armv7_pmnc_disable_counter(idx);
-
- /*
- * Set event (if destined for PMNx counters)
- * We set the event for the cycle counter because we
- * have the ability to perform event filtering.
- */
- if (hwc->config_base & KRAIT_EVENT_MASK)
- krait_evt_setup(idx, hwc->config_base);
- else
- armv7_pmnc_write_evtsel(idx, hwc->config_base);
-
- /* Enable interrupt for this counter */
- armv7_pmnc_enable_intens(idx);
-
- /* Enable counter */
- armv7_pmnc_enable_counter(idx);
-
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static void krait_pmu_reset(void *info)
-{
- u32 vval, fval;
- struct arm_pmu *cpu_pmu = info;
- u32 idx, nb_cnt = cpu_pmu->num_events;
-
- armv7pmu_reset(info);
-
- /* Clear all pmresrs */
- krait_write_pmresrn(0, 0);
- krait_write_pmresrn(1, 0);
- krait_write_pmresrn(2, 0);
-
- venum_pre_pmresr(&vval, &fval);
- venum_write_pmresr(0);
- venum_post_pmresr(vval, fval);
-
- /* Reset PMxEVNCTCR to sane default */
- for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
- armv7_pmnc_select_counter(idx);
- asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
- }
-
-}
-
-static int krait_event_to_bit(struct perf_event *event, unsigned int region,
- unsigned int group)
-{
- int bit;
- struct hw_perf_event *hwc = &event->hw;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
-
- if (hwc->config_base & VENUM_EVENT)
- bit = KRAIT_VPMRESR0_GROUP0;
- else
- bit = krait_get_pmresrn_event(region);
- bit -= krait_get_pmresrn_event(0);
- bit += group;
- /*
- * Lower bits are reserved for use by the counters (see
- * armv7pmu_get_event_idx() for more info)
- */
- bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
-
- return bit;
-}
-
-/*
- * We check for column exclusion constraints here.
- * Two events cant use the same group within a pmresr register.
- */
-static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
- struct perf_event *event)
-{
- int idx;
- int bit = -1;
- struct hw_perf_event *hwc = &event->hw;
- unsigned int region = EVENT_REGION(hwc->config_base);
- unsigned int code = EVENT_CODE(hwc->config_base);
- unsigned int group = EVENT_GROUP(hwc->config_base);
- bool venum_event = EVENT_VENUM(hwc->config_base);
- bool krait_event = EVENT_CPU(hwc->config_base);
-
- if (venum_event || krait_event) {
- /* Ignore invalid events */
- if (group > 3 || region > 2)
- return -EINVAL;
- if (venum_event && (code & 0xe0))
- return -EINVAL;
-
- bit = krait_event_to_bit(event, region, group);
- if (test_and_set_bit(bit, cpuc->used_mask))
- return -EAGAIN;
- }
-
- idx = armv7pmu_get_event_idx(cpuc, event);
- if (idx < 0 && bit >= 0)
- clear_bit(bit, cpuc->used_mask);
-
- return idx;
-}
-
-static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
- struct perf_event *event)
-{
- int bit;
- struct hw_perf_event *hwc = &event->hw;
- unsigned int region = EVENT_REGION(hwc->config_base);
- unsigned int group = EVENT_GROUP(hwc->config_base);
- bool venum_event = EVENT_VENUM(hwc->config_base);
- bool krait_event = EVENT_CPU(hwc->config_base);
-
- if (venum_event || krait_event) {
- bit = krait_event_to_bit(event, region, group);
- clear_bit(bit, cpuc->used_mask);
- }
-}
-
-static int krait_pmu_init(struct arm_pmu *cpu_pmu)
-{
- armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "armv7_krait";
- /* Some early versions of Krait don't support PC write events */
- if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
- "qcom,no-pc-write"))
- cpu_pmu->map_event = krait_map_event_no_branch;
- else
- cpu_pmu->map_event = krait_map_event;
- cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
- cpu_pmu->reset = krait_pmu_reset;
- cpu_pmu->enable = krait_pmu_enable_event;
- cpu_pmu->disable = krait_pmu_disable_event;
- cpu_pmu->get_event_idx = krait_pmu_get_event_idx;
- cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
- return armv7_probe_num_events(cpu_pmu);
-}
-
-/*
- * Scorpion Local Performance Monitor Register (LPMn)
- *
- * 31 30 24 16 8 0
- * +--------------------------------+
- * LPM0 | EN | CC | CC | CC | CC | N = 1, R = 0
- * +--------------------------------+
- * LPM1 | EN | CC | CC | CC | CC | N = 1, R = 1
- * +--------------------------------+
- * LPM2 | EN | CC | CC | CC | CC | N = 1, R = 2
- * +--------------------------------+
- * L2LPM | EN | CC | CC | CC | CC | N = 1, R = 3
- * +--------------------------------+
- * VLPM | EN | CC | CC | CC | CC | N = 2, R = ?
- * +--------------------------------+
- * EN | G=3 | G=2 | G=1 | G=0
- *
- *
- * Event Encoding:
- *
- * hwc->config_base = 0xNRCCG
- *
- * N = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM)
- * R = region register
- * CC = class of events the group G is choosing from
- * G = group or particular event
- *
- * Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2
- *
- * A region (R) corresponds to a piece of the CPU (execution unit, instruction
- * unit, etc.) while the event code (CC) corresponds to a particular class of
- * events (interrupts for example). An event code is broken down into
- * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
- * example).
- */
-
-static u32 scorpion_read_pmresrn(int n)
-{
- u32 val;
-
- switch (n) {
- case 0:
- asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val));
- break;
- case 1:
- asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
- break;
- case 2:
- asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val));
- break;
- case 3:
- asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val));
- break;
- default:
- BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
- }
-
- return val;
-}
-
-static void scorpion_write_pmresrn(int n, u32 val)
-{
- switch (n) {
- case 0:
- asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val));
- break;
- case 1:
- asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val));
- break;
- case 2:
- asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val));
- break;
- case 3:
- asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val));
- break;
- default:
- BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
- }
-}
-
-static u32 scorpion_get_pmresrn_event(unsigned int region)
-{
- static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0,
- SCORPION_LPM1_GROUP0,
- SCORPION_LPM2_GROUP0,
- SCORPION_L2LPM_GROUP0 };
- return pmresrn_table[region];
-}
-
-static void scorpion_evt_setup(int idx, u32 config_base)
-{
- u32 val;
- u32 mask;
- u32 vval, fval;
- unsigned int region = EVENT_REGION(config_base);
- unsigned int group = EVENT_GROUP(config_base);
- unsigned int code = EVENT_CODE(config_base);
- unsigned int group_shift;
- bool venum_event = EVENT_VENUM(config_base);
-
- group_shift = group * 8;
- mask = 0xff << group_shift;
-
- /* Configure evtsel for the region and group */
- if (venum_event)
- val = SCORPION_VLPM_GROUP0;
- else
- val = scorpion_get_pmresrn_event(region);
- val += group;
- /* Mix in mode-exclusion bits */
- val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
- armv7_pmnc_write_evtsel(idx, val);
-
- asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
-
- if (venum_event) {
- venum_pre_pmresr(&vval, &fval);
- val = venum_read_pmresr();
- val &= ~mask;
- val |= code << group_shift;
- val |= PMRESRn_EN;
- venum_write_pmresr(val);
- venum_post_pmresr(vval, fval);
- } else {
- val = scorpion_read_pmresrn(region);
- val &= ~mask;
- val |= code << group_shift;
- val |= PMRESRn_EN;
- scorpion_write_pmresrn(region, val);
- }
-}
-
-static void scorpion_clearpmu(u32 config_base)
-{
- u32 val;
- u32 vval, fval;
- unsigned int region = EVENT_REGION(config_base);
- unsigned int group = EVENT_GROUP(config_base);
- bool venum_event = EVENT_VENUM(config_base);
-
- if (venum_event) {
- venum_pre_pmresr(&vval, &fval);
- val = venum_read_pmresr();
- val = clear_pmresrn_group(val, group);
- venum_write_pmresr(val);
- venum_post_pmresr(vval, fval);
- } else {
- val = scorpion_read_pmresrn(region);
- val = clear_pmresrn_group(val, group);
- scorpion_write_pmresrn(region, val);
- }
-}
-
-static void scorpion_pmu_disable_event(struct perf_event *event)
-{
- unsigned long flags;
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
- /* Disable counter and interrupt */
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
-
- /* Disable counter */
- armv7_pmnc_disable_counter(idx);
-
- /*
- * Clear pmresr code (if destined for PMNx counters)
- */
- if (hwc->config_base & KRAIT_EVENT_MASK)
- scorpion_clearpmu(hwc->config_base);
-
- /* Disable interrupt for this counter */
- armv7_pmnc_disable_intens(idx);
-
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static void scorpion_pmu_enable_event(struct perf_event *event)
-{
- unsigned long flags;
- struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
- /*
- * Enable counter and interrupt, and set the counter to count
- * the event that we're interested in.
- */
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
-
- /* Disable counter */
- armv7_pmnc_disable_counter(idx);
-
- /*
- * Set event (if destined for PMNx counters)
- * We don't set the event for the cycle counter because we
- * don't have the ability to perform event filtering.
- */
- if (hwc->config_base & KRAIT_EVENT_MASK)
- scorpion_evt_setup(idx, hwc->config_base);
- else if (idx != ARMV7_IDX_CYCLE_COUNTER)
- armv7_pmnc_write_evtsel(idx, hwc->config_base);
-
- /* Enable interrupt for this counter */
- armv7_pmnc_enable_intens(idx);
-
- /* Enable counter */
- armv7_pmnc_enable_counter(idx);
-
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static void scorpion_pmu_reset(void *info)
-{
- u32 vval, fval;
- struct arm_pmu *cpu_pmu = info;
- u32 idx, nb_cnt = cpu_pmu->num_events;
-
- armv7pmu_reset(info);
-
- /* Clear all pmresrs */
- scorpion_write_pmresrn(0, 0);
- scorpion_write_pmresrn(1, 0);
- scorpion_write_pmresrn(2, 0);
- scorpion_write_pmresrn(3, 0);
-
- venum_pre_pmresr(&vval, &fval);
- venum_write_pmresr(0);
- venum_post_pmresr(vval, fval);
-
- /* Reset PMxEVNCTCR to sane default */
- for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
- armv7_pmnc_select_counter(idx);
- asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
- }
-}
-
-static int scorpion_event_to_bit(struct perf_event *event, unsigned int region,
- unsigned int group)
-{
- int bit;
- struct hw_perf_event *hwc = &event->hw;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
-
- if (hwc->config_base & VENUM_EVENT)
- bit = SCORPION_VLPM_GROUP0;
- else
- bit = scorpion_get_pmresrn_event(region);
- bit -= scorpion_get_pmresrn_event(0);
- bit += group;
- /*
- * Lower bits are reserved for use by the counters (see
- * armv7pmu_get_event_idx() for more info)
- */
- bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
-
- return bit;
-}
-
-/*
- * We check for column exclusion constraints here.
- * Two events cant use the same group within a pmresr register.
- */
-static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc,
- struct perf_event *event)
-{
- int idx;
- int bit = -1;
- struct hw_perf_event *hwc = &event->hw;
- unsigned int region = EVENT_REGION(hwc->config_base);
- unsigned int group = EVENT_GROUP(hwc->config_base);
- bool venum_event = EVENT_VENUM(hwc->config_base);
- bool scorpion_event = EVENT_CPU(hwc->config_base);
-
- if (venum_event || scorpion_event) {
- /* Ignore invalid events */
- if (group > 3 || region > 3)
- return -EINVAL;
-
- bit = scorpion_event_to_bit(event, region, group);
- if (test_and_set_bit(bit, cpuc->used_mask))
- return -EAGAIN;
- }
-
- idx = armv7pmu_get_event_idx(cpuc, event);
- if (idx < 0 && bit >= 0)
- clear_bit(bit, cpuc->used_mask);
-
- return idx;
-}
-
-static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
- struct perf_event *event)
-{
- int bit;
- struct hw_perf_event *hwc = &event->hw;
- unsigned int region = EVENT_REGION(hwc->config_base);
- unsigned int group = EVENT_GROUP(hwc->config_base);
- bool venum_event = EVENT_VENUM(hwc->config_base);
- bool scorpion_event = EVENT_CPU(hwc->config_base);
-
- if (venum_event || scorpion_event) {
- bit = scorpion_event_to_bit(event, region, group);
- clear_bit(bit, cpuc->used_mask);
- }
-}
-
-static int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
-{
- armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "armv7_scorpion";
- cpu_pmu->map_event = scorpion_map_event;
- cpu_pmu->reset = scorpion_pmu_reset;
- cpu_pmu->enable = scorpion_pmu_enable_event;
- cpu_pmu->disable = scorpion_pmu_disable_event;
- cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx;
- cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
- return armv7_probe_num_events(cpu_pmu);
-}
-
-static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
-{
- armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "armv7_scorpion_mp";
- cpu_pmu->map_event = scorpion_map_event;
- cpu_pmu->reset = scorpion_pmu_reset;
- cpu_pmu->enable = scorpion_pmu_enable_event;
- cpu_pmu->disable = scorpion_pmu_disable_event;
- cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx;
- cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
- return armv7_probe_num_events(cpu_pmu);
-}
-
-static const struct of_device_id armv7_pmu_of_device_ids[] = {
- {.compatible = "arm,cortex-a17-pmu", .data = armv7_a17_pmu_init},
- {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init},
- {.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init},
- {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init},
- {.compatible = "arm,cortex-a8-pmu", .data = armv7_a8_pmu_init},
- {.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init},
- {.compatible = "arm,cortex-a5-pmu", .data = armv7_a5_pmu_init},
- {.compatible = "qcom,krait-pmu", .data = krait_pmu_init},
- {.compatible = "qcom,scorpion-pmu", .data = scorpion_pmu_init},
- {.compatible = "qcom,scorpion-mp-pmu", .data = scorpion_mp_pmu_init},
- {},
-};
-
-static const struct pmu_probe_info armv7_pmu_probe_table[] = {
- ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A8, armv7_a8_pmu_init),
- ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A9, armv7_a9_pmu_init),
- { /* sentinel value */ }
-};
-
-
-static int armv7_pmu_device_probe(struct platform_device *pdev)
-{
- return arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids,
- armv7_pmu_probe_table);
-}
-
-static struct platform_driver armv7_pmu_driver = {
- .driver = {
- .name = "armv7-pmu",
- .of_match_table = armv7_pmu_of_device_ids,
- },
- .probe = armv7_pmu_device_probe,
-};
-
-builtin_platform_driver(armv7_pmu_driver);
-#endif /* CONFIG_CPU_V7 */
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
deleted file mode 100644
index 0e51f5e4f879..000000000000
--- a/arch/arm/kernel/perf_event_xscale.c
+++ /dev/null
@@ -1,771 +0,0 @@
-/*
- * ARMv5 [xscale] Performance counter handling code.
- *
- * Copyright (C) 2010, ARM Ltd., Will Deacon <will.deacon@arm.com>
- *
- * Based on the previous xscale OProfile code.
- *
- * There are two variants of the xscale PMU that we support:
- * - xscale1pmu: 2 event counters and a cycle counter
- * - xscale2pmu: 4 event counters and a cycle counter
- * The two variants share event definitions, but have different
- * PMU structures.
- */
-
-#ifdef CONFIG_CPU_XSCALE
-
-#include <asm/cputype.h>
-#include <asm/irq_regs.h>
-
-#include <linux/of.h>
-#include <linux/perf/arm_pmu.h>
-#include <linux/platform_device.h>
-
-enum xscale_perf_types {
- XSCALE_PERFCTR_ICACHE_MISS = 0x00,
- XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01,
- XSCALE_PERFCTR_DATA_STALL = 0x02,
- XSCALE_PERFCTR_ITLB_MISS = 0x03,
- XSCALE_PERFCTR_DTLB_MISS = 0x04,
- XSCALE_PERFCTR_BRANCH = 0x05,
- XSCALE_PERFCTR_BRANCH_MISS = 0x06,
- XSCALE_PERFCTR_INSTRUCTION = 0x07,
- XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08,
- XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09,
- XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A,
- XSCALE_PERFCTR_DCACHE_MISS = 0x0B,
- XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C,
- XSCALE_PERFCTR_PC_CHANGED = 0x0D,
- XSCALE_PERFCTR_BCU_REQUEST = 0x10,
- XSCALE_PERFCTR_BCU_FULL = 0x11,
- XSCALE_PERFCTR_BCU_DRAIN = 0x12,
- XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14,
- XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15,
- XSCALE_PERFCTR_RMW = 0x16,
- /* XSCALE_PERFCTR_CCNT is not hardware defined */
- XSCALE_PERFCTR_CCNT = 0xFE,
- XSCALE_PERFCTR_UNUSED = 0xFF,
-};
-
-enum xscale_counters {
- XSCALE_CYCLE_COUNTER = 0,
- XSCALE_COUNTER0,
- XSCALE_COUNTER1,
- XSCALE_COUNTER2,
- XSCALE_COUNTER3,
-};
-
-static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
- PERF_MAP_ALL_UNSUPPORTED,
- [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
- [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
- [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XSCALE_PERFCTR_ICACHE_NO_DELIVER,
-};
-
-static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- PERF_CACHE_MAP_ALL_UNSUPPORTED,
-
- [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
- [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
- [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
- [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
-
- [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
-
- [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
- [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
-
- [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
- [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
-};
-
-#define XSCALE_PMU_ENABLE 0x001
-#define XSCALE_PMN_RESET 0x002
-#define XSCALE_CCNT_RESET 0x004
-#define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET)
-#define XSCALE_PMU_CNT64 0x008
-
-#define XSCALE1_OVERFLOWED_MASK 0x700
-#define XSCALE1_CCOUNT_OVERFLOW 0x400
-#define XSCALE1_COUNT0_OVERFLOW 0x100
-#define XSCALE1_COUNT1_OVERFLOW 0x200
-#define XSCALE1_CCOUNT_INT_EN 0x040
-#define XSCALE1_COUNT0_INT_EN 0x010
-#define XSCALE1_COUNT1_INT_EN 0x020
-#define XSCALE1_COUNT0_EVT_SHFT 12
-#define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT)
-#define XSCALE1_COUNT1_EVT_SHFT 20
-#define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT)
-
-static inline u32
-xscale1pmu_read_pmnc(void)
-{
- u32 val;
- asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
- return val;
-}
-
-static inline void
-xscale1pmu_write_pmnc(u32 val)
-{
- /* upper 4bits and 7, 11 are write-as-0 */
- val &= 0xffff77f;
- asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
-}
-
-static inline int
-xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
- enum xscale_counters counter)
-{
- int ret = 0;
-
- switch (counter) {
- case XSCALE_CYCLE_COUNTER:
- ret = pmnc & XSCALE1_CCOUNT_OVERFLOW;
- break;
- case XSCALE_COUNTER0:
- ret = pmnc & XSCALE1_COUNT0_OVERFLOW;
- break;
- case XSCALE_COUNTER1:
- ret = pmnc & XSCALE1_COUNT1_OVERFLOW;
- break;
- default:
- WARN_ONCE(1, "invalid counter number (%d)\n", counter);
- }
-
- return ret;
-}
-
-static irqreturn_t
-xscale1pmu_handle_irq(int irq_num, void *dev)
-{
- unsigned long pmnc;
- struct perf_sample_data data;
- struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
- struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
- struct pt_regs *regs;
- int idx;
-
- /*
- * NOTE: there's an A stepping erratum that states if an overflow
- * bit already exists and another occurs, the previous
- * Overflow bit gets cleared. There's no workaround.
- * Fixed in B stepping or later.
- */
- pmnc = xscale1pmu_read_pmnc();
-
- /*
- * Write the value back to clear the overflow flags. Overflow
- * flags remain in pmnc for use below. We also disable the PMU
- * while we process the interrupt.
- */
- xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
-
- if (!(pmnc & XSCALE1_OVERFLOWED_MASK))
- return IRQ_NONE;
-
- regs = get_irq_regs();
-
- for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
- struct perf_event *event = cpuc->events[idx];
- struct hw_perf_event *hwc;
-
- if (!event)
- continue;
-
- if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
- continue;
-
- hwc = &event->hw;
- armpmu_event_update(event);
- perf_sample_data_init(&data, 0, hwc->last_period);
- if (!armpmu_event_set_period(event))
- continue;
-
- if (perf_event_overflow(event, &data, regs))
- cpu_pmu->disable(event);
- }
-
- irq_work_run();
-
- /*
- * Re-enable the PMU.
- */
- pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE;
- xscale1pmu_write_pmnc(pmnc);
-
- return IRQ_HANDLED;
-}
-
-static void xscale1pmu_enable_event(struct perf_event *event)
-{
- unsigned long val, mask, evt, flags;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
- int idx = hwc->idx;
-
- switch (idx) {
- case XSCALE_CYCLE_COUNTER:
- mask = 0;
- evt = XSCALE1_CCOUNT_INT_EN;
- break;
- case XSCALE_COUNTER0:
- mask = XSCALE1_COUNT0_EVT_MASK;
- evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) |
- XSCALE1_COUNT0_INT_EN;
- break;
- case XSCALE_COUNTER1:
- mask = XSCALE1_COUNT1_EVT_MASK;
- evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) |
- XSCALE1_COUNT1_INT_EN;
- break;
- default:
- WARN_ONCE(1, "invalid counter number (%d)\n", idx);
- return;
- }
-
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- val = xscale1pmu_read_pmnc();
- val &= ~mask;
- val |= evt;
- xscale1pmu_write_pmnc(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static void xscale1pmu_disable_event(struct perf_event *event)
-{
- unsigned long val, mask, evt, flags;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
- int idx = hwc->idx;
-
- switch (idx) {
- case XSCALE_CYCLE_COUNTER:
- mask = XSCALE1_CCOUNT_INT_EN;
- evt = 0;
- break;
- case XSCALE_COUNTER0:
- mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK;
- evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT;
- break;
- case XSCALE_COUNTER1:
- mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK;
- evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT;
- break;
- default:
- WARN_ONCE(1, "invalid counter number (%d)\n", idx);
- return;
- }
-
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- val = xscale1pmu_read_pmnc();
- val &= ~mask;
- val |= evt;
- xscale1pmu_write_pmnc(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static int
-xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
- struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- if (XSCALE_PERFCTR_CCNT == hwc->config_base) {
- if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
- return -EAGAIN;
-
- return XSCALE_CYCLE_COUNTER;
- } else {
- if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask))
- return XSCALE_COUNTER1;
-
- if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask))
- return XSCALE_COUNTER0;
-
- return -EAGAIN;
- }
-}
-
-static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
-{
- unsigned long flags, val;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- val = xscale1pmu_read_pmnc();
- val |= XSCALE_PMU_ENABLE;
- xscale1pmu_write_pmnc(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static void xscale1pmu_stop(struct arm_pmu *cpu_pmu)
-{
- unsigned long flags, val;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- val = xscale1pmu_read_pmnc();
- val &= ~XSCALE_PMU_ENABLE;
- xscale1pmu_write_pmnc(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static inline u32 xscale1pmu_read_counter(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- int counter = hwc->idx;
- u32 val = 0;
-
- switch (counter) {
- case XSCALE_CYCLE_COUNTER:
- asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
- break;
- case XSCALE_COUNTER0:
- asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
- break;
- case XSCALE_COUNTER1:
- asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
- break;
- }
-
- return val;
-}
-
-static inline void xscale1pmu_write_counter(struct perf_event *event, u32 val)
-{
- struct hw_perf_event *hwc = &event->hw;
- int counter = hwc->idx;
-
- switch (counter) {
- case XSCALE_CYCLE_COUNTER:
- asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
- break;
- case XSCALE_COUNTER0:
- asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
- break;
- case XSCALE_COUNTER1:
- asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
- break;
- }
-}
-
-static int xscale_map_event(struct perf_event *event)
-{
- return armpmu_map_event(event, &xscale_perf_map,
- &xscale_perf_cache_map, 0xFF);
-}
-
-static int xscale1pmu_init(struct arm_pmu *cpu_pmu)
-{
- cpu_pmu->name = "armv5_xscale1";
- cpu_pmu->handle_irq = xscale1pmu_handle_irq;
- cpu_pmu->enable = xscale1pmu_enable_event;
- cpu_pmu->disable = xscale1pmu_disable_event;
- cpu_pmu->read_counter = xscale1pmu_read_counter;
- cpu_pmu->write_counter = xscale1pmu_write_counter;
- cpu_pmu->get_event_idx = xscale1pmu_get_event_idx;
- cpu_pmu->start = xscale1pmu_start;
- cpu_pmu->stop = xscale1pmu_stop;
- cpu_pmu->map_event = xscale_map_event;
- cpu_pmu->num_events = 3;
- cpu_pmu->max_period = (1LLU << 32) - 1;
-
- return 0;
-}
-
-#define XSCALE2_OVERFLOWED_MASK 0x01f
-#define XSCALE2_CCOUNT_OVERFLOW 0x001
-#define XSCALE2_COUNT0_OVERFLOW 0x002
-#define XSCALE2_COUNT1_OVERFLOW 0x004
-#define XSCALE2_COUNT2_OVERFLOW 0x008
-#define XSCALE2_COUNT3_OVERFLOW 0x010
-#define XSCALE2_CCOUNT_INT_EN 0x001
-#define XSCALE2_COUNT0_INT_EN 0x002
-#define XSCALE2_COUNT1_INT_EN 0x004
-#define XSCALE2_COUNT2_INT_EN 0x008
-#define XSCALE2_COUNT3_INT_EN 0x010
-#define XSCALE2_COUNT0_EVT_SHFT 0
-#define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT)
-#define XSCALE2_COUNT1_EVT_SHFT 8
-#define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT)
-#define XSCALE2_COUNT2_EVT_SHFT 16
-#define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT)
-#define XSCALE2_COUNT3_EVT_SHFT 24
-#define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT)
-
-static inline u32
-xscale2pmu_read_pmnc(void)
-{
- u32 val;
- asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
- /* bits 1-2 and 4-23 are read-unpredictable */
- return val & 0xff000009;
-}
-
-static inline void
-xscale2pmu_write_pmnc(u32 val)
-{
- /* bits 4-23 are write-as-0, 24-31 are write ignored */
- val &= 0xf;
- asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
-}
-
-static inline u32
-xscale2pmu_read_overflow_flags(void)
-{
- u32 val;
- asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val));
- return val;
-}
-
-static inline void
-xscale2pmu_write_overflow_flags(u32 val)
-{
- asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val));
-}
-
-static inline u32
-xscale2pmu_read_event_select(void)
-{
- u32 val;
- asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val));
- return val;
-}
-
-static inline void
-xscale2pmu_write_event_select(u32 val)
-{
- asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val));
-}
-
-static inline u32
-xscale2pmu_read_int_enable(void)
-{
- u32 val;
- asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val));
- return val;
-}
-
-static void
-xscale2pmu_write_int_enable(u32 val)
-{
- asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val));
-}
-
-static inline int
-xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
- enum xscale_counters counter)
-{
- int ret = 0;
-
- switch (counter) {
- case XSCALE_CYCLE_COUNTER:
- ret = of_flags & XSCALE2_CCOUNT_OVERFLOW;
- break;
- case XSCALE_COUNTER0:
- ret = of_flags & XSCALE2_COUNT0_OVERFLOW;
- break;
- case XSCALE_COUNTER1:
- ret = of_flags & XSCALE2_COUNT1_OVERFLOW;
- break;
- case XSCALE_COUNTER2:
- ret = of_flags & XSCALE2_COUNT2_OVERFLOW;
- break;
- case XSCALE_COUNTER3:
- ret = of_flags & XSCALE2_COUNT3_OVERFLOW;
- break;
- default:
- WARN_ONCE(1, "invalid counter number (%d)\n", counter);
- }
-
- return ret;
-}
-
-static irqreturn_t
-xscale2pmu_handle_irq(int irq_num, void *dev)
-{
- unsigned long pmnc, of_flags;
- struct perf_sample_data data;
- struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
- struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
- struct pt_regs *regs;
- int idx;
-
- /* Disable the PMU. */
- pmnc = xscale2pmu_read_pmnc();
- xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
-
- /* Check the overflow flag register. */
- of_flags = xscale2pmu_read_overflow_flags();
- if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
- return IRQ_NONE;
-
- /* Clear the overflow bits. */
- xscale2pmu_write_overflow_flags(of_flags);
-
- regs = get_irq_regs();
-
- for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
- struct perf_event *event = cpuc->events[idx];
- struct hw_perf_event *hwc;
-
- if (!event)
- continue;
-
- if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
- continue;
-
- hwc = &event->hw;
- armpmu_event_update(event);
- perf_sample_data_init(&data, 0, hwc->last_period);
- if (!armpmu_event_set_period(event))
- continue;
-
- if (perf_event_overflow(event, &data, regs))
- cpu_pmu->disable(event);
- }
-
- irq_work_run();
-
- /*
- * Re-enable the PMU.
- */
- pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
- xscale2pmu_write_pmnc(pmnc);
-
- return IRQ_HANDLED;
-}
-
-static void xscale2pmu_enable_event(struct perf_event *event)
-{
- unsigned long flags, ien, evtsel;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
- int idx = hwc->idx;
-
- ien = xscale2pmu_read_int_enable();
- evtsel = xscale2pmu_read_event_select();
-
- switch (idx) {
- case XSCALE_CYCLE_COUNTER:
- ien |= XSCALE2_CCOUNT_INT_EN;
- break;
- case XSCALE_COUNTER0:
- ien |= XSCALE2_COUNT0_INT_EN;
- evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
- evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
- break;
- case XSCALE_COUNTER1:
- ien |= XSCALE2_COUNT1_INT_EN;
- evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
- evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
- break;
- case XSCALE_COUNTER2:
- ien |= XSCALE2_COUNT2_INT_EN;
- evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
- evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
- break;
- case XSCALE_COUNTER3:
- ien |= XSCALE2_COUNT3_INT_EN;
- evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
- evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
- break;
- default:
- WARN_ONCE(1, "invalid counter number (%d)\n", idx);
- return;
- }
-
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- xscale2pmu_write_event_select(evtsel);
- xscale2pmu_write_int_enable(ien);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static void xscale2pmu_disable_event(struct perf_event *event)
-{
- unsigned long flags, ien, evtsel, of_flags;
- struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
- int idx = hwc->idx;
-
- ien = xscale2pmu_read_int_enable();
- evtsel = xscale2pmu_read_event_select();
-
- switch (idx) {
- case XSCALE_CYCLE_COUNTER:
- ien &= ~XSCALE2_CCOUNT_INT_EN;
- of_flags = XSCALE2_CCOUNT_OVERFLOW;
- break;
- case XSCALE_COUNTER0:
- ien &= ~XSCALE2_COUNT0_INT_EN;
- evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
- evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
- of_flags = XSCALE2_COUNT0_OVERFLOW;
- break;
- case XSCALE_COUNTER1:
- ien &= ~XSCALE2_COUNT1_INT_EN;
- evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
- evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
- of_flags = XSCALE2_COUNT1_OVERFLOW;
- break;
- case XSCALE_COUNTER2:
- ien &= ~XSCALE2_COUNT2_INT_EN;
- evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
- evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
- of_flags = XSCALE2_COUNT2_OVERFLOW;
- break;
- case XSCALE_COUNTER3:
- ien &= ~XSCALE2_COUNT3_INT_EN;
- evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
- evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
- of_flags = XSCALE2_COUNT3_OVERFLOW;
- break;
- default:
- WARN_ONCE(1, "invalid counter number (%d)\n", idx);
- return;
- }
-
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- xscale2pmu_write_event_select(evtsel);
- xscale2pmu_write_int_enable(ien);
- xscale2pmu_write_overflow_flags(of_flags);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static int
-xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
- struct perf_event *event)
-{
- int idx = xscale1pmu_get_event_idx(cpuc, event);
- if (idx >= 0)
- goto out;
-
- if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask))
- idx = XSCALE_COUNTER3;
- else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask))
- idx = XSCALE_COUNTER2;
-out:
- return idx;
-}
-
-static void xscale2pmu_start(struct arm_pmu *cpu_pmu)
-{
- unsigned long flags, val;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
- val |= XSCALE_PMU_ENABLE;
- xscale2pmu_write_pmnc(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static void xscale2pmu_stop(struct arm_pmu *cpu_pmu)
-{
- unsigned long flags, val;
- struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
-
- raw_spin_lock_irqsave(&events->pmu_lock, flags);
- val = xscale2pmu_read_pmnc();
- val &= ~XSCALE_PMU_ENABLE;
- xscale2pmu_write_pmnc(val);
- raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
-}
-
-static inline u32 xscale2pmu_read_counter(struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- int counter = hwc->idx;
- u32 val = 0;
-
- switch (counter) {
- case XSCALE_CYCLE_COUNTER:
- asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
- break;
- case XSCALE_COUNTER0:
- asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
- break;
- case XSCALE_COUNTER1:
- asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
- break;
- case XSCALE_COUNTER2:
- asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
- break;
- case XSCALE_COUNTER3:
- asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
- break;
- }
-
- return val;
-}
-
-static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val)
-{
- struct hw_perf_event *hwc = &event->hw;
- int counter = hwc->idx;
-
- switch (counter) {
- case XSCALE_CYCLE_COUNTER:
- asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
- break;
- case XSCALE_COUNTER0:
- asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
- break;
- case XSCALE_COUNTER1:
- asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
- break;
- case XSCALE_COUNTER2:
- asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
- break;
- case XSCALE_COUNTER3:
- asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
- break;
- }
-}
-
-static int xscale2pmu_init(struct arm_pmu *cpu_pmu)
-{
- cpu_pmu->name = "armv5_xscale2";
- cpu_pmu->handle_irq = xscale2pmu_handle_irq;
- cpu_pmu->enable = xscale2pmu_enable_event;
- cpu_pmu->disable = xscale2pmu_disable_event;
- cpu_pmu->read_counter = xscale2pmu_read_counter;
- cpu_pmu->write_counter = xscale2pmu_write_counter;
- cpu_pmu->get_event_idx = xscale2pmu_get_event_idx;
- cpu_pmu->start = xscale2pmu_start;
- cpu_pmu->stop = xscale2pmu_stop;
- cpu_pmu->map_event = xscale_map_event;
- cpu_pmu->num_events = 5;
- cpu_pmu->max_period = (1LLU << 32) - 1;
-
- return 0;
-}
-
-static const struct pmu_probe_info xscale_pmu_probe_table[] = {
- XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V1, xscale1pmu_init),
- XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V2, xscale2pmu_init),
- { /* sentinel value */ }
-};
-
-static int xscale_pmu_device_probe(struct platform_device *pdev)
-{
- return arm_pmu_device_probe(pdev, NULL, xscale_pmu_probe_table);
-}
-
-static struct platform_driver xscale_pmu_driver = {
- .driver = {
- .name = "xscale-pmu",
- },
- .probe = xscale_pmu_device_probe,
-};
-
-builtin_platform_driver(xscale_pmu_driver);
-#endif /* CONFIG_CPU_XSCALE */
diff --git a/arch/arm/kernel/perf_regs.c b/arch/arm/kernel/perf_regs.c
index c366b83bf955..0529f90395c9 100644
--- a/arch/arm/kernel/perf_regs.c
+++ b/arch/arm/kernel/perf_regs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/errno.h>
#include <linux/kernel.h>
@@ -31,8 +32,7 @@ u64 perf_reg_abi(struct task_struct *task)
}
void perf_get_regs_user(struct perf_regs *regs_user,
- struct pt_regs *regs,
- struct pt_regs *regs_user_copy)
+ struct pt_regs *regs)
{
regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current);
diff --git a/arch/arm/kernel/phys2virt.S b/arch/arm/kernel/phys2virt.S
new file mode 100644
index 000000000000..fb53db78fe78
--- /dev/null
+++ b/arch/arm/kernel/phys2virt.S
@@ -0,0 +1,238 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 1994-2002 Russell King
+ * Copyright (c) 2003, 2020 ARM Limited
+ * All Rights Reserved
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/page.h>
+
+#ifdef __ARMEB__
+#define LOW_OFFSET 0x4
+#define HIGH_OFFSET 0x0
+#else
+#define LOW_OFFSET 0x0
+#define HIGH_OFFSET 0x4
+#endif
+
+/*
+ * __fixup_pv_table - patch the stub instructions with the delta between
+ * PHYS_OFFSET and PAGE_OFFSET, which is assumed to be
+ * 2 MiB aligned.
+ *
+ * Called from head.S, which expects the following registers to be preserved:
+ * r1 = machine no, r2 = atags or dtb,
+ * r8 = phys_offset, r9 = cpuid, r10 = procinfo
+ */
+ __HEAD
+ENTRY(__fixup_pv_table)
+ mov r0, r8, lsr #PAGE_SHIFT @ convert to PFN
+ str_l r0, __pv_phys_pfn_offset, r3
+
+ adr_l r0, __pv_offset
+ subs r3, r8, #PAGE_OFFSET @ PHYS_OFFSET - PAGE_OFFSET
+ mvn ip, #0
+ strcc ip, [r0, #HIGH_OFFSET] @ save to __pv_offset high bits
+ str r3, [r0, #LOW_OFFSET] @ save to __pv_offset low bits
+
+ mov r0, r3, lsr #21 @ constant for add/sub instructions
+ teq r3, r0, lsl #21 @ must be 2 MiB aligned
+ bne 0f
+
+ adr_l r4, __pv_table_begin
+ adr_l r5, __pv_table_end
+ b __fixup_a_pv_table
+
+0: mov r0, r0 @ deadloop on error
+ b 0b
+ENDPROC(__fixup_pv_table)
+
+ .text
+__fixup_a_pv_table:
+ adr_l r6, __pv_offset
+ ldr r0, [r6, #HIGH_OFFSET] @ pv_offset high word
+ ldr r6, [r6, #LOW_OFFSET] @ pv_offset low word
+ cmn r0, #1
+#ifdef CONFIG_THUMB2_KERNEL
+ @
+ @ The Thumb-2 versions of the patchable sequences are
+ @
+ @ phys-to-virt: movw <reg>, #offset<31:21>
+ @ lsl <reg>, #21
+ @ sub <VA>, <PA>, <reg>
+ @
+ @ virt-to-phys (non-LPAE): movw <reg>, #offset<31:21>
+ @ lsl <reg>, #21
+ @ add <PA>, <VA>, <reg>
+ @
+ @ virt-to-phys (LPAE): movw <reg>, #offset<31:21>
+ @ lsl <reg>, #21
+ @ adds <PAlo>, <VA>, <reg>
+ @ mov <PAhi>, #offset<39:32>
+ @ adc <PAhi>, <PAhi>, #0
+ @
+ @ In the non-LPAE case, all patchable instructions are MOVW
+ @ instructions, where we need to patch in the offset into the
+ @ second halfword of the opcode (the 16-bit immediate is encoded
+ @ as imm4:i:imm3:imm8)
+ @
+ @ 15 11 10 9 4 3 0 15 14 12 11 8 7 0
+ @ +-----------+---+-------------+------++---+------+----+------+
+ @ MOVW | 1 1 1 1 0 | i | 1 0 0 1 0 0 | imm4 || 0 | imm3 | Rd | imm8 |
+ @ +-----------+---+-------------+------++---+------+----+------+
+ @
+ @ In the LPAE case, we also need to patch in the high word of the
+ @ offset into the immediate field of the MOV instruction, or patch it
+ @ to a MVN instruction if the offset is negative. In this case, we
+ @ need to inspect the first halfword of the opcode, to check whether
+ @ it is MOVW or MOV/MVN, and to perform the MOV to MVN patching if
+ @ needed. The encoding of the immediate is rather complex for values
+ @ of i:imm3 != 0b0000, but fortunately, we never need more than 8 lower
+ @ order bits, which can be patched into imm8 directly (and i:imm3
+ @ cleared)
+ @
+ @ 15 11 10 9 5 0 15 14 12 11 8 7 0
+ @ +-----------+---+---------------------++---+------+----+------+
+ @ MOV | 1 1 1 1 0 | i | 0 0 0 1 0 0 1 1 1 1 || 0 | imm3 | Rd | imm8 |
+ @ MVN | 1 1 1 1 0 | i | 0 0 0 1 1 0 1 1 1 1 || 0 | imm3 | Rd | imm8 |
+ @ +-----------+---+---------------------++---+------+----+------+
+ @
+ moveq r0, #0x200000 @ set bit 21, mov to mvn instruction
+ lsrs r3, r6, #29 @ isolate top 3 bits of displacement
+ ubfx r6, r6, #21, #8 @ put bits 28:21 into the MOVW imm8 field
+ bfi r6, r3, #12, #3 @ put bits 31:29 into the MOVW imm3 field
+ b .Lnext
+.Lloop: add r7, r4
+ adds r4, #4 @ clears Z flag
+#ifdef CONFIG_ARM_LPAE
+ ldrh ip, [r7]
+ARM_BE8(rev16 ip, ip)
+ tst ip, #0x200 @ MOVW has bit 9 set, MVN has it clear
+ bne 0f @ skip to MOVW handling (Z flag is clear)
+ bic ip, #0x20 @ clear bit 5 (MVN -> MOV)
+ orr ip, ip, r0, lsr #16 @ MOV -> MVN if offset < 0
+ARM_BE8(rev16 ip, ip)
+ strh ip, [r7]
+ @ Z flag is set
+0:
+#endif
+ ldrh ip, [r7, #2]
+ARM_BE8(rev16 ip, ip)
+ and ip, #0xf00 @ clear everything except Rd field
+ orreq ip, r0 @ Z flag set -> MOV/MVN -> patch in high bits
+ orrne ip, r6 @ Z flag clear -> MOVW -> patch in low bits
+ARM_BE8(rev16 ip, ip)
+ strh ip, [r7, #2]
+#else
+#ifdef CONFIG_CPU_ENDIAN_BE8
+@ in BE8, we load data in BE, but instructions still in LE
+#define PV_BIT24 0x00000001
+#define PV_IMM8_MASK 0xff000000
+#define PV_IMMR_MSB 0x00080000
+#else
+#define PV_BIT24 0x01000000
+#define PV_IMM8_MASK 0x000000ff
+#define PV_IMMR_MSB 0x00000800
+#endif
+
+ @
+ @ The ARM versions of the patchable sequences are
+ @
+ @ phys-to-virt: sub <VA>, <PA>, #offset<31:24>, lsl #24
+ @ sub <VA>, <PA>, #offset<23:16>, lsl #16
+ @
+ @ virt-to-phys (non-LPAE): add <PA>, <VA>, #offset<31:24>, lsl #24
+ @ add <PA>, <VA>, #offset<23:16>, lsl #16
+ @
+ @ virt-to-phys (LPAE): movw <reg>, #offset<31:20>
+ @ adds <PAlo>, <VA>, <reg>, lsl #20
+ @ mov <PAhi>, #offset<39:32>
+ @ adc <PAhi>, <PAhi>, #0
+ @
+ @ In the non-LPAE case, all patchable instructions are ADD or SUB
+ @ instructions, where we need to patch in the offset into the
+ @ immediate field of the opcode, which is emitted with the correct
+ @ rotation value. (The effective value of the immediate is imm12<7:0>
+ @ rotated right by [2 * imm12<11:8>] bits)
+ @
+ @ 31 28 27 23 22 20 19 16 15 12 11 0
+ @ +------+-----------------+------+------+-------+
+ @ ADD | cond | 0 0 1 0 1 0 0 0 | Rn | Rd | imm12 |
+ @ SUB | cond | 0 0 1 0 0 1 0 0 | Rn | Rd | imm12 |
+ @ MOV | cond | 0 0 1 1 1 0 1 0 | Rn | Rd | imm12 |
+ @ MVN | cond | 0 0 1 1 1 1 1 0 | Rn | Rd | imm12 |
+ @ +------+-----------------+------+------+-------+
+ @
+ @ In the LPAE case, we use a MOVW instruction to carry the low offset
+ @ word, and patch in the high word of the offset into the immediate
+ @ field of the subsequent MOV instruction, or patch it to a MVN
+ @ instruction if the offset is negative. We can distinguish MOVW
+ @ instructions based on bits 23:22 of the opcode, and ADD/SUB can be
+ @ distinguished from MOV/MVN (all using the encodings above) using
+ @ bit 24.
+ @
+ @ 31 28 27 23 22 20 19 16 15 12 11 0
+ @ +------+-----------------+------+------+-------+
+ @ MOVW | cond | 0 0 1 1 0 0 0 0 | imm4 | Rd | imm12 |
+ @ +------+-----------------+------+------+-------+
+ @
+ moveq r0, #0x400000 @ set bit 22, mov to mvn instruction
+ mov r3, r6, lsr #16 @ put offset bits 31-16 into r3
+ mov r6, r6, lsr #24 @ put offset bits 31-24 into r6
+ and r3, r3, #0xf0 @ only keep offset bits 23-20 in r3
+ b .Lnext
+.Lloop: ldr ip, [r7, r4]
+#ifdef CONFIG_ARM_LPAE
+ tst ip, #PV_BIT24 @ ADD/SUB have bit 24 clear
+ beq 1f
+ARM_BE8(rev ip, ip)
+ tst ip, #0xc00000 @ MOVW has bits 23:22 clear
+ bic ip, ip, #0x400000 @ clear bit 22
+ bfc ip, #0, #12 @ clear imm12 field of MOV[W] instruction
+ orreq ip, ip, r6, lsl #4 @ MOVW -> mask in offset bits 31-24
+ orreq ip, ip, r3, lsr #4 @ MOVW -> mask in offset bits 23-20
+ orrne ip, ip, r0 @ MOV -> mask in offset bits 7-0 (or bit 22)
+ARM_BE8(rev ip, ip)
+ b 2f
+1:
+#endif
+ tst ip, #PV_IMMR_MSB @ rotation value >= 16 ?
+ bic ip, ip, #PV_IMM8_MASK
+ orreq ip, ip, r6 ARM_BE8(, lsl #24) @ mask in offset bits 31-24
+ orrne ip, ip, r3 ARM_BE8(, lsl #24) @ mask in offset bits 23-20
+2:
+ str ip, [r7, r4]
+ add r4, r4, #4
+#endif
+
+.Lnext:
+ cmp r4, r5
+ ldrcc r7, [r4] @ use branch for delay slot
+ bcc .Lloop
+ ret lr
+ENDPROC(__fixup_a_pv_table)
+
+ENTRY(fixup_pv_table)
+ stmfd sp!, {r4 - r7, lr}
+ mov r4, r0 @ r0 = table start
+ add r5, r0, r1 @ r1 = table size
+ bl __fixup_a_pv_table
+ ldmfd sp!, {r4 - r7, pc}
+ENDPROC(fixup_pv_table)
+
+ .data
+ .align 2
+ .globl __pv_phys_pfn_offset
+ .type __pv_phys_pfn_offset, %object
+__pv_phys_pfn_offset:
+ .word 0
+ .size __pv_phys_pfn_offset, . -__pv_phys_pfn_offset
+
+ .globl __pv_offset
+ .type __pv_offset, %object
+__pv_offset:
+ .quad 0
+ .size __pv_offset, . -__pv_offset
diff --git a/arch/arm/kernel/pj4-cp0.c b/arch/arm/kernel/pj4-cp0.c
deleted file mode 100644
index 7c9248b74d3f..000000000000
--- a/arch/arm/kernel/pj4-cp0.c
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * linux/arch/arm/kernel/pj4-cp0.c
- *
- * PJ4 iWMMXt coprocessor context switching and handling
- *
- * Copyright (c) 2010 Marvell International Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <asm/thread_notify.h>
-#include <asm/cputype.h>
-
-static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
-{
- struct thread_info *thread = t;
-
- switch (cmd) {
- case THREAD_NOTIFY_FLUSH:
- /*
- * flush_thread() zeroes thread->fpstate, so no need
- * to do anything here.
- *
- * FALLTHROUGH: Ensure we don't try to overwrite our newly
- * initialised state information on the first fault.
- */
-
- case THREAD_NOTIFY_EXIT:
- iwmmxt_task_release(thread);
- break;
-
- case THREAD_NOTIFY_SWITCH:
- iwmmxt_task_switch(thread);
- break;
- }
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block __maybe_unused iwmmxt_notifier_block = {
- .notifier_call = iwmmxt_do,
-};
-
-
-static u32 __init pj4_cp_access_read(void)
-{
- u32 value;
-
- __asm__ __volatile__ (
- "mrc p15, 0, %0, c1, c0, 2\n\t"
- : "=r" (value));
- return value;
-}
-
-static void __init pj4_cp_access_write(u32 value)
-{
- u32 temp;
-
- __asm__ __volatile__ (
- "mcr p15, 0, %1, c1, c0, 2\n\t"
-#ifdef CONFIG_THUMB2_KERNEL
- "isb\n\t"
-#else
- "mrc p15, 0, %0, c1, c0, 2\n\t"
- "mov %0, %0\n\t"
- "sub pc, pc, #4\n\t"
-#endif
- : "=r" (temp) : "r" (value));
-}
-
-static int __init pj4_get_iwmmxt_version(void)
-{
- u32 cp_access, wcid;
-
- cp_access = pj4_cp_access_read();
- pj4_cp_access_write(cp_access | 0xf);
-
- /* check if coprocessor 0 and 1 are available */
- if ((pj4_cp_access_read() & 0xf) != 0xf) {
- pj4_cp_access_write(cp_access);
- return -ENODEV;
- }
-
- /* read iWMMXt coprocessor id register p1, c0 */
- __asm__ __volatile__ ("mrc p1, 0, %0, c0, c0, 0\n" : "=r" (wcid));
-
- pj4_cp_access_write(cp_access);
-
- /* iWMMXt v1 */
- if ((wcid & 0xffffff00) == 0x56051000)
- return 1;
- /* iWMMXt v2 */
- if ((wcid & 0xffffff00) == 0x56052000)
- return 2;
-
- return -EINVAL;
-}
-
-/*
- * Disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy
- * switch code handle iWMMXt context switching.
- */
-static int __init pj4_cp0_init(void)
-{
- u32 __maybe_unused cp_access;
- int vers;
-
- if (!cpu_is_pj4())
- return 0;
-
- vers = pj4_get_iwmmxt_version();
- if (vers < 0)
- return 0;
-
-#ifndef CONFIG_IWMMXT
- pr_info("PJ4 iWMMXt coprocessor detected, but kernel support is missing.\n");
-#else
- cp_access = pj4_cp_access_read() & ~0xf;
- pj4_cp_access_write(cp_access);
-
- pr_info("PJ4 iWMMXt v%d coprocessor enabled.\n", vers);
- elf_hwcap |= HWCAP_IWMMXT;
- thread_register_notifier(&iwmmxt_notifier_block);
-#endif
-
- return 0;
-}
-
-late_initcall(pj4_cp0_init);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index d96714e1858c..d7aa95225c70 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/process.c
*
* Copyright (C) 1996-2000 Russell King - Converted to ARM.
* Original Copyright (C) 1995 Linus Torvalds
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-#include <stdarg.h>
-
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
@@ -21,7 +16,6 @@
#include <linux/unistd.h>
#include <linux/user.h>
#include <linux/interrupt.h>
-#include <linux/kallsyms.h>
#include <linux/init.h>
#include <linux/elfcore.h>
#include <linux/pm.h>
@@ -40,12 +34,23 @@
#include <asm/tls.h>
#include <asm/vdso.h>
-#ifdef CONFIG_CC_STACKPROTECTOR
+#include "signal.h"
+
+#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
+DEFINE_PER_CPU(struct task_struct *, __entry_task);
+#endif
+
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
+#ifndef CONFIG_CURRENT_POINTER_IN_TPIDRURO
+asmlinkage struct task_struct *__current;
+EXPORT_SYMBOL(__current);
+#endif
+
static const char *processor_modes[] __maybe_unused = {
"USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
"UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
@@ -73,7 +78,6 @@ void arch_cpu_idle(void)
arm_pm_idle();
else
cpu_do_idle();
- local_irq_enable();
}
void arch_cpu_idle_prepare(void)
@@ -94,12 +98,23 @@ void arch_cpu_idle_exit(void)
ledtrig_cpu(CPU_LED_IDLE_END);
}
+void __show_regs_alloc_free(struct pt_regs *regs)
+{
+ int i;
+
+ /* check for r0 - r12 only */
+ for (i = 0; i < 13; i++) {
+ pr_alert("Register r%d information:", i);
+ mem_dump_obj((void *)regs->uregs[i]);
+ }
+}
+
void __show_regs(struct pt_regs *regs)
{
unsigned long flags;
char buf[64];
#ifndef CONFIG_CPU_V7M
- unsigned int domain, fs;
+ unsigned int domain;
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Get the domain register for the parent context. In user
@@ -108,21 +123,18 @@ void __show_regs(struct pt_regs *regs)
*/
if (user_mode(regs)) {
domain = DACR_UACCESS_ENABLE;
- fs = get_fs();
} else {
domain = to_svc_pt_regs(regs)->dacr;
- fs = to_svc_pt_regs(regs)->addr_limit;
}
#else
domain = get_domain();
- fs = get_fs();
#endif
#endif
show_regs_print_info(KERN_DEFAULT);
- print_symbol("PC is at %s\n", instruction_pointer(regs));
- print_symbol("LR is at %s\n", regs->ARM_lr);
+ printk("PC is at %pS\n", (void *)instruction_pointer(regs));
+ printk("LR is at %pS\n", (void *)regs->ARM_lr);
printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n",
regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr);
printk("sp : %08lx ip : %08lx fp : %08lx\n",
@@ -151,8 +163,6 @@ void __show_regs(struct pt_regs *regs)
if ((domain & domain_mask(DOMAIN_USER)) ==
domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
segment = "none";
- else if (fs == get_ds())
- segment = "kernel";
else
segment = "user";
@@ -190,7 +200,7 @@ void __show_regs(struct pt_regs *regs)
void show_regs(struct pt_regs * regs)
{
__show_regs(regs);
- dump_stack();
+ dump_backtrace(regs, NULL, KERN_DEFAULT);
}
ATOMIC_NOTIFIER_HEAD(thread_notify_head);
@@ -212,7 +222,6 @@ void flush_thread(void)
flush_ptrace_hw_breakpoint(tsk);
- memset(thread->used_cp, 0, sizeof(thread->used_cp));
memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
memset(&thread->fpstate, 0, sizeof(union fp_state));
@@ -221,16 +230,13 @@ void flush_thread(void)
thread_notify(THREAD_NOTIFY_FLUSH, thread);
}
-void release_thread(struct task_struct *dead_task)
-{
-}
-
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
-int
-copy_thread(unsigned long clone_flags, unsigned long stack_start,
- unsigned long stk_sz, struct task_struct *p)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ u64 clone_flags = args->flags;
+ unsigned long stack_start = args->stack;
+ unsigned long tls = args->tls;
struct thread_info *thread = task_thread_info(p);
struct pt_regs *childregs = task_pt_regs(p);
@@ -246,15 +252,15 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
thread->cpu_domain = get_domain();
#endif
- if (likely(!(p->flags & PF_KTHREAD))) {
+ if (likely(!args->fn)) {
*childregs = *current_pt_regs();
childregs->ARM_r0 = 0;
if (stack_start)
childregs->ARM_sp = stack_start;
} else {
memset(childregs, 0, sizeof(struct pt_regs));
- thread->cpu_context.r4 = stk_sz;
- thread->cpu_context.r5 = stack_start;
+ thread->cpu_context.r4 = (unsigned long)args->fn_arg;
+ thread->cpu_context.r5 = (unsigned long)args->fn;
childregs->ARM_cpsr = SVC_MODE;
}
thread->cpu_context.pc = (unsigned long)ret_from_fork;
@@ -263,7 +269,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
clear_ptrace_hw_breakpoint(p);
if (clone_flags & CLONE_SETTLS)
- thread->tp_value[0] = childregs->ARM_r3;
+ thread->tp_value[0] = tls;
thread->tp_value[1] = get_tpuser();
thread_notify(THREAD_NOTIFY_COPY, thread);
@@ -271,37 +277,11 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
return 0;
}
-/*
- * Fill in the task's elfregs structure for a core dump.
- */
-int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs)
-{
- elf_core_copy_regs(elfregs, task_pt_regs(t));
- return 1;
-}
-
-/*
- * fill in the fpe structure for a core dump...
- */
-int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
-{
- struct thread_info *thread = current_thread_info();
- int used_math = thread->used_cp[1] | thread->used_cp[2];
-
- if (used_math)
- memcpy(fp, &thread->fpstate.soft, sizeof (*fp));
-
- return used_math != 0;
-}
-EXPORT_SYMBOL(dump_fpu);
-
-unsigned long get_wchan(struct task_struct *p)
+unsigned long __get_wchan(struct task_struct *p)
{
struct stackframe frame;
unsigned long stack_page;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
- return 0;
frame.fp = thread_saved_fp(p);
frame.sp = thread_saved_sp(p);
@@ -319,11 +299,6 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
}
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
- return randomize_page(mm->brk, 0x02000000);
-}
-
#ifdef CONFIG_MMU
#ifdef CONFIG_KUSER_HELPERS
/*
@@ -331,15 +306,15 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
* atomic helpers. Insert it into the gate_vma so that it is visible
* through ptrace and /proc/<pid>/mem.
*/
-static struct vm_area_struct gate_vma = {
- .vm_start = 0xffff0000,
- .vm_end = 0xffff0000 + PAGE_SIZE,
- .vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC,
-};
+static struct vm_area_struct gate_vma;
static int __init gate_vma_init(void)
{
+ vma_init(&gate_vma, NULL);
gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
+ gate_vma.vm_start = 0xffff0000;
+ gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
+ vm_flags_init(&gate_vma, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC);
return 0;
}
arch_initcall(gate_vma_init);
@@ -394,7 +369,7 @@ static unsigned long sigpage_addr(const struct mm_struct *mm,
slots = ((last - first) >> PAGE_SHIFT) + 1;
- offset = get_random_int() % slots;
+ offset = get_random_u32_below(slots);
addr = first + (offset << PAGE_SHIFT);
@@ -434,7 +409,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
npages = 1; /* for sigpage */
npages += vdso_total_pages;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
hint = sigpage_addr(mm, npages);
addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
@@ -461,7 +436,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
arm_install_vdso(mm, addr + PAGE_SIZE);
up_fail:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
#endif
diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c
index cb3fcaeb2233..3bb0c4dcfc5c 100644
--- a/arch/arm/kernel/psci_smp.c
+++ b/arch/arm/kernel/psci_smp.c
@@ -1,12 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*
* Copyright (C) 2012 ARM Limited
*
@@ -52,13 +45,20 @@ extern void secondary_startup(void);
static int psci_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
if (psci_ops.cpu_on)
+#ifdef CONFIG_XIP_KERNEL
+ return psci_ops.cpu_on(cpu_logical_map(cpu),
+ ((phys_addr_t)(&secondary_startup)
+ - XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)
+ + CONFIG_XIP_PHYS_ADDR));
+#else
return psci_ops.cpu_on(cpu_logical_map(cpu),
virt_to_idmap(&secondary_startup));
+#endif
return -ENODEV;
}
#ifdef CONFIG_HOTPLUG_CPU
-int psci_cpu_disable(unsigned int cpu)
+static int psci_cpu_disable(unsigned int cpu)
{
/* Fail early if we don't have CPU_OFF support */
if (!psci_ops.cpu_off)
@@ -71,7 +71,7 @@ int psci_cpu_disable(unsigned int cpu)
return 0;
}
-void psci_cpu_die(unsigned int cpu)
+static void psci_cpu_die(unsigned int cpu)
{
u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN <<
PSCI_0_2_POWER_STATE_TYPE_SHIFT;
@@ -83,7 +83,7 @@ void psci_cpu_die(unsigned int cpu)
panic("psci: cpu %d failed to shutdown\n", cpu);
}
-int psci_cpu_kill(unsigned int cpu)
+static int psci_cpu_kill(unsigned int cpu)
{
int err, i;
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 58e3771e4c5b..7951b2c06fec 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/ptrace.c
*
* By Ross Biro 1/23/92
* edited by Linus Torvalds
* ARM modifications Copyright (C) 2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/sched/signal.h>
@@ -25,10 +22,9 @@
#include <linux/hw_breakpoint.h>
#include <linux/regset.h>
#include <linux/audit.h>
-#include <linux/tracehook.h>
#include <linux/unistd.h>
-#include <asm/pgtable.h>
+#include <asm/syscall.h>
#include <asm/traps.h>
#define CREATE_TRACE_POINTS
@@ -201,21 +197,15 @@ void ptrace_disable(struct task_struct *child)
/*
* Handle hitting a breakpoint.
*/
-void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
+void ptrace_break(struct pt_regs *regs)
{
- siginfo_t info;
-
- info.si_signo = SIGTRAP;
- info.si_errno = 0;
- info.si_code = TRAP_BRKPT;
- info.si_addr = (void __user *)instruction_pointer(regs);
-
- force_sig_info(SIGTRAP, &info, tsk);
+ force_sig_fault(SIGTRAP, TRAP_BRKPT,
+ (void __user *)instruction_pointer(regs));
}
static int break_trap(struct pt_regs *regs, unsigned int instr)
{
- ptrace_break(current, regs);
+ ptrace_break(regs);
return 0;
}
@@ -228,8 +218,8 @@ static struct undef_hook arm_break_hook = {
};
static struct undef_hook thumb_break_hook = {
- .instr_mask = 0xffff,
- .instr_val = 0xde01,
+ .instr_mask = 0xffffffff,
+ .instr_val = 0x0000de01,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = PSR_T_BIT,
.fn = break_trap,
@@ -328,32 +318,6 @@ static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
#endif
-#ifdef CONFIG_CRUNCH
-/*
- * Get the child Crunch state.
- */
-static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp)
-{
- struct thread_info *thread = task_thread_info(tsk);
-
- crunch_task_disable(thread); /* force it to ram */
- return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE)
- ? -EFAULT : 0;
-}
-
-/*
- * Set the child Crunch state.
- */
-static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
-{
- struct thread_info *thread = task_thread_info(tsk);
-
- crunch_task_release(thread); /* force a reload */
- return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE)
- ? -EFAULT : 0;
-}
-#endif
-
#ifdef CONFIG_HAVE_HW_BREAKPOINT
/*
* Convert a virtual register number into an index for a thread_info
@@ -390,7 +354,6 @@ static void ptrace_hbptriggered(struct perf_event *bp,
struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
long num;
int i;
- siginfo_t info;
for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i)
if (current->thread.debug.hbp[i] == bp)
@@ -398,12 +361,7 @@ static void ptrace_hbptriggered(struct perf_event *bp,
num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i);
- info.si_signo = SIGTRAP;
- info.si_errno = (int)num;
- info.si_code = TRAP_HWBKPT;
- info.si_addr = (void __user *)(bkpt->trigger);
-
- force_sig_info(SIGTRAP, &info, current);
+ force_sig_ptrace_errno_trap((int)num, (void __user *)(bkpt->trigger));
}
/*
@@ -585,14 +543,9 @@ out:
static int gpr_get(struct task_struct *target,
const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
+ struct membuf to)
{
- struct pt_regs *regs = task_pt_regs(target);
-
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- regs,
- 0, sizeof(*regs));
+ return membuf_write(&to, task_pt_regs(target), sizeof(struct pt_regs));
}
static int gpr_set(struct task_struct *target,
@@ -618,12 +571,10 @@ static int gpr_set(struct task_struct *target,
static int fpa_get(struct task_struct *target,
const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
+ struct membuf to)
{
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &task_thread_info(target)->fpstate,
- 0, sizeof(struct user_fp));
+ return membuf_write(&to, &task_thread_info(target)->fpstate,
+ sizeof(struct user_fp));
}
static int fpa_set(struct task_struct *target,
@@ -633,8 +584,6 @@ static int fpa_set(struct task_struct *target,
{
struct thread_info *thread = task_thread_info(target);
- thread->used_cp[1] = thread->used_cp[2] = 1;
-
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&thread->fpstate,
0, sizeof(struct user_fp));
@@ -658,41 +607,20 @@ static int fpa_set(struct task_struct *target,
* vfp_set() ignores this chunk
*
* 1 word for the FPSCR
- *
- * The bounds-checking logic built into user_regset_copyout and friends
- * means that we can make a simple sequence of calls to map the relevant data
- * to/from the specified slice of the user regset structure.
*/
static int vfp_get(struct task_struct *target,
const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
+ struct membuf to)
{
- int ret;
struct thread_info *thread = task_thread_info(target);
struct vfp_hard_struct const *vfp = &thread->vfpstate.hard;
- const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
vfp_sync_hwstate(thread);
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &vfp->fpregs,
- user_fpregs_offset,
- user_fpregs_offset + sizeof(vfp->fpregs));
- if (ret)
- return ret;
-
- ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
- user_fpregs_offset + sizeof(vfp->fpregs),
- user_fpscr_offset);
- if (ret)
- return ret;
-
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &vfp->fpscr,
- user_fpscr_offset,
- user_fpscr_offset + sizeof(vfp->fpscr));
+ membuf_write(&to, vfp->fpregs, sizeof(vfp->fpregs));
+ membuf_zero(&to, user_fpscr_offset - sizeof(vfp->fpregs));
+ return membuf_store(&to, vfp->fpscr);
}
/*
@@ -721,11 +649,9 @@ static int vfp_set(struct task_struct *target,
if (ret)
return ret;
- ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
- user_fpregs_offset + sizeof(new_vfp.fpregs),
- user_fpscr_offset);
- if (ret)
- return ret;
+ user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ user_fpregs_offset + sizeof(new_vfp.fpregs),
+ user_fpscr_offset);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&new_vfp.fpscr,
@@ -751,11 +677,11 @@ enum arm_regset {
static const struct user_regset arm_regsets[] = {
[REGSET_GPR] = {
- .core_note_type = NT_PRSTATUS,
+ USER_REGSET_NOTE_TYPE(PRSTATUS),
.n = ELF_NGREG,
.size = sizeof(u32),
.align = sizeof(u32),
- .get = gpr_get,
+ .regset_get = gpr_get,
.set = gpr_set
},
[REGSET_FPR] = {
@@ -763,11 +689,11 @@ static const struct user_regset arm_regsets[] = {
* For the FPA regs in fpstate, the real fields are a mixture
* of sizes, so pretend that the registers are word-sized:
*/
- .core_note_type = NT_PRFPREG,
+ USER_REGSET_NOTE_TYPE(PRFPREG),
.n = sizeof(struct user_fp) / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
- .get = fpa_get,
+ .regset_get = fpa_get,
.set = fpa_set
},
#ifdef CONFIG_VFP
@@ -776,11 +702,11 @@ static const struct user_regset arm_regsets[] = {
* Pretend that the VFP regs are word-sized, since the FPSCR is
* a single word dangling at the end of struct user_vfp:
*/
- .core_note_type = NT_ARM_VFP,
+ USER_REGSET_NOTE_TYPE(ARM_VFP),
.n = ARM_VFPREGS_SIZE / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
- .get = vfp_get,
+ .regset_get = vfp_get,
.set = vfp_set
},
#endif /* CONFIG_VFP */
@@ -855,20 +781,12 @@ long arch_ptrace(struct task_struct *child, long request,
break;
case PTRACE_SET_SYSCALL:
- task_thread_info(child)->syscall = data;
+ if (data != -1)
+ data &= __NR_SYSCALL_MASK;
+ task_thread_info(child)->abi_syscall = data;
ret = 0;
break;
-#ifdef CONFIG_CRUNCH
- case PTRACE_GETCRUNCHREGS:
- ret = ptrace_getcrunchregs(child, datap);
- break;
-
- case PTRACE_SETCRUNCHREGS:
- ret = ptrace_setcrunchregs(child, datap);
- break;
-#endif
-
#ifdef CONFIG_VFP
case PTRACE_GETVFPREGS:
ret = copy_regset_to_user(child,
@@ -909,8 +827,7 @@ enum ptrace_syscall_dir {
PTRACE_SYSCALL_EXIT,
};
-static void tracehook_report_syscall(struct pt_regs *regs,
- enum ptrace_syscall_dir dir)
+static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir)
{
unsigned long ip;
@@ -922,31 +839,31 @@ static void tracehook_report_syscall(struct pt_regs *regs,
regs->ARM_ip = dir;
if (dir == PTRACE_SYSCALL_EXIT)
- tracehook_report_syscall_exit(regs, 0);
- else if (tracehook_report_syscall_entry(regs))
- current_thread_info()->syscall = -1;
+ ptrace_report_syscall_exit(regs, 0);
+ else if (ptrace_report_syscall_entry(regs))
+ current_thread_info()->abi_syscall = -1;
regs->ARM_ip = ip;
}
-asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
+asmlinkage int syscall_trace_enter(struct pt_regs *regs)
{
- current_thread_info()->syscall = scno;
+ int scno;
if (test_thread_flag(TIF_SYSCALL_TRACE))
- tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+ report_syscall(regs, PTRACE_SYSCALL_ENTER);
/* Do seccomp after ptrace; syscall may have changed. */
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
- if (secure_computing(NULL) == -1)
+ if (secure_computing() == -1)
return -1;
#else
/* XXX: remove this once OABI gets fixed */
- secure_computing_strict(current_thread_info()->syscall);
+ secure_computing_strict(syscall_get_nr(current, regs));
#endif
/* Tracer or seccomp may have changed syscall. */
- scno = current_thread_info()->syscall;
+ scno = syscall_get_nr(current, regs);
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, scno);
@@ -975,5 +892,5 @@ asmlinkage void syscall_trace_exit(struct pt_regs *regs)
trace_sys_exit(regs, regs_return_value(regs));
if (test_thread_flag(TIF_SYSCALL_TRACE))
- tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
+ report_syscall(regs, PTRACE_SYSCALL_EXIT);
}
diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
index 3b2aa9a9fe26..3f0d5c3dae11 100644
--- a/arch/arm/kernel/reboot.c
+++ b/arch/arm/kernel/reboot.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 1996-2000 Russell King - Converted to ARM.
* Original Copyright (C) 1995 Linus Torvalds
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/cpu.h>
#include <linux/delay.h>
@@ -13,6 +10,7 @@
#include <asm/cacheflush.h>
#include <asm/idmap.h>
#include <asm/virt.h>
+#include <asm/system_misc.h>
#include "reboot.h"
@@ -21,7 +19,6 @@ typedef void (*phys_reset_t)(unsigned long, bool);
/*
* Function pointers to optional machine specific functions
*/
-void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
@@ -91,11 +88,11 @@ void soft_restart(unsigned long addr)
* to execute e.g. a RAM-based pin loop is not sufficient. This allows the
* kexec'd kernel to use any and all RAM as it sees fit, without having to
* avoid any code or data used by any SW CPU pin loop. The CPU hotplug
- * functionality embodied in disable_nonboot_cpus() to achieve this.
+ * functionality embodied in smp_shutdown_nonboot_cpus() to achieve this.
*/
void machine_shutdown(void)
{
- disable_nonboot_cpus();
+ smp_shutdown_nonboot_cpus(reboot_cpu);
}
/*
@@ -120,9 +117,7 @@ void machine_power_off(void)
{
local_irq_disable();
smp_send_stop();
-
- if (pm_power_off)
- pm_power_off();
+ do_kernel_power_off();
}
/*
@@ -141,10 +136,7 @@ void machine_restart(char *cmd)
local_irq_disable();
smp_send_stop();
- if (arm_pm_restart)
- arm_pm_restart(reboot_mode, cmd);
- else
- do_kernel_restart(cmd);
+ do_kernel_restart(cmd);
/* Give a grace period for failure to restart of 1s */
mdelay(1000);
diff --git a/arch/arm/kernel/reboot.h b/arch/arm/kernel/reboot.h
index bf7a0b1f076e..189ab81b77b6 100644
--- a/arch/arm/kernel/reboot.h
+++ b/arch/arm/kernel/reboot.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef REBOOT_H
#define REBOOT_H
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
index 35e72585ec1d..218d524360fc 100644
--- a/arch/arm/kernel/relocate_kernel.S
+++ b/arch/arm/kernel/relocate_kernel.S
@@ -1,17 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* relocate_kernel.S - put the kernel image in place to boot
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
#include <asm/kexec.h>
.align 3 /* not needed for this code, but keeps fncpy() happy */
ENTRY(relocate_new_kernel)
- ldr r0,kexec_indirection_page
- ldr r1,kexec_start_address
+ adr r7, relocate_new_kernel_end
+ ldr r0, [r7, #KEXEC_INDIR_PAGE]
+ ldr r1, [r7, #KEXEC_START_ADDR]
/*
* If there is no indirection page (we are doing crashdumps)
@@ -24,26 +27,26 @@ ENTRY(relocate_new_kernel)
ldr r3, [r0],#4
/* Is it a destination page. Put destination address to r4 */
- tst r3,#1,0
+ tst r3,#1
beq 1f
bic r4,r3,#1
b 0b
1:
/* Is it an indirection page */
- tst r3,#2,0
+ tst r3,#2
beq 1f
bic r0,r3,#2
b 0b
1:
/* are we done ? */
- tst r3,#4,0
+ tst r3,#4
beq 1f
b 2f
1:
/* is it source ? */
- tst r3,#8,0
+ tst r3,#8
beq 0b
bic r3,r3,#8
mov r6,#1024
@@ -56,34 +59,16 @@ ENTRY(relocate_new_kernel)
2:
/* Jump to relocated kernel */
- mov lr,r1
- mov r0,#0
- ldr r1,kexec_mach_type
- ldr r2,kexec_boot_atags
- ARM( ret lr )
- THUMB( bx lr )
-
- .align
-
- .globl kexec_start_address
-kexec_start_address:
- .long 0x0
-
- .globl kexec_indirection_page
-kexec_indirection_page:
- .long 0x0
-
- .globl kexec_mach_type
-kexec_mach_type:
- .long 0x0
-
- /* phy addr of the atags for the new kernel */
- .globl kexec_boot_atags
-kexec_boot_atags:
- .long 0x0
+ mov lr, r1
+ mov r0, #0
+ ldr r1, [r7, #KEXEC_MACH_TYPE]
+ ldr r2, [r7, #KEXEC_R2]
+ ARM( ret lr )
+ THUMB( bx lr )
ENDPROC(relocate_new_kernel)
+ .align 3
relocate_new_kernel_end:
.globl relocate_new_kernel_size
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index 36ed35073289..ac15db66df4c 100644
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -1,17 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm/kernel/return_address.c
*
* Copyright (C) 2009 Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
* for Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
*/
#include <linux/export.h>
#include <linux/ftrace.h>
-
-#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
#include <linux/sched.h>
#include <asm/stacktrace.h>
@@ -21,17 +16,17 @@ struct return_address_data {
void *addr;
};
-static int save_return_addr(struct stackframe *frame, void *d)
+static bool save_return_addr(void *d, unsigned long pc)
{
struct return_address_data *data = d;
if (!data->level) {
- data->addr = (void *)frame->pc;
+ data->addr = (void *)pc;
- return 1;
+ return false;
} else {
--data->level;
- return 0;
+ return true;
}
}
@@ -46,7 +41,13 @@ void *return_address(unsigned int level)
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_stack_pointer;
frame.lr = (unsigned long)__builtin_return_address(0);
- frame.pc = (unsigned long)return_address;
+here:
+ frame.pc = (unsigned long)&&here;
+#ifdef CONFIG_KRETPROBES
+ frame.kr_cur = NULL;
+ frame.tsk = current;
+#endif
+ frame.ex_frame = false;
walk_stackframe(&frame, save_return_addr, &data);
@@ -56,6 +57,4 @@ void *return_address(unsigned int level)
return NULL;
}
-#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */
-
EXPORT_SYMBOL_GPL(return_address);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 8e9a3e40d949..0bfd66c7ada0 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/setup.c
*
* Copyright (C) 1995-2001 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/efi.h>
#include <linux/export.h>
@@ -16,12 +13,12 @@
#include <linux/utsname.h>
#include <linux/initrd.h>
#include <linux/console.h>
-#include <linux/bootmem.h>
#include <linux/seq_file.h>
#include <linux/screen_info.h>
-#include <linux/of_platform.h>
#include <linux/init.h>
#include <linux/kexec.h>
+#include <linux/libfdt.h>
+#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/cpu.h>
#include <linux/interrupt.h>
@@ -62,6 +59,7 @@
#include <asm/unwind.h>
#include <asm/memblock.h>
#include <asm/virt.h>
+#include <asm/kasan.h>
#include "atags.h"
@@ -78,13 +76,6 @@ static int __init fpe_setup(char *line)
__setup("fpe=", fpe_setup);
#endif
-extern void init_default_cache_policy(unsigned long);
-extern void paging_init(const struct machine_desc *desc);
-extern void early_mm_init(const struct machine_desc *);
-extern void adjust_lowmem_bounds(void);
-extern enum reboot_mode reboot_mode;
-extern void setup_dma_zone(const struct machine_desc *desc);
-
unsigned int processor_id;
EXPORT_SYMBOL(processor_id);
unsigned int __machine_arch_type __read_mostly;
@@ -115,6 +106,11 @@ EXPORT_SYMBOL(elf_hwcap2);
#ifdef MULTI_CPU
struct processor processor __ro_after_init;
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+struct processor *cpu_vtable[NR_CPUS] = {
+ [0] = &processor,
+};
+#endif
#endif
#ifdef MULTI_TLB
struct cpu_tlb_fns cpu_tlb __ro_after_init;
@@ -138,10 +134,10 @@ EXPORT_SYMBOL(outer_cache);
int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
struct stack {
- u32 irq[3];
- u32 abt[3];
- u32 und[3];
- u32 fiq[3];
+ u32 irq[4];
+ u32 abt[4];
+ u32 und[4];
+ u32 fiq[4];
} ____cacheline_aligned;
#ifndef CONFIG_CPU_V7M
@@ -447,6 +443,8 @@ static void __init cpuid_init_hwcaps(void)
{
int block;
u32 isar5;
+ u32 isar6;
+ u32 pfr2;
if (cpu_architecture() < CPU_ARCH_ARMv7)
return;
@@ -482,6 +480,18 @@ static void __init cpuid_init_hwcaps(void)
block = cpuid_feature_extract_field(isar5, 16);
if (block >= 1)
elf_hwcap2 |= HWCAP2_CRC32;
+
+ /* Check for Speculation barrier instruction */
+ isar6 = read_cpuid_ext(CPUID_EXT_ISAR6);
+ block = cpuid_feature_extract_field(isar6, 12);
+ if (block >= 1)
+ elf_hwcap2 |= HWCAP2_SB;
+
+ /* Check for Speculative Store Bypassing control */
+ pfr2 = read_cpuid_ext(CPUID_EXT_PFR2);
+ block = cpuid_feature_extract_field(pfr2, 4);
+ if (block >= 1)
+ elf_hwcap2 |= HWCAP2_SSBS;
}
static void __init elf_hwcap_fixup(void)
@@ -542,9 +552,11 @@ void notrace cpu_init(void)
* In Thumb-2, msr with an immediate value is not allowed.
*/
#ifdef CONFIG_THUMB2_KERNEL
-#define PLC "r"
+#define PLC_l "l"
+#define PLC_r "r"
#else
-#define PLC "I"
+#define PLC_l "I"
+#define PLC_r "I"
#endif
/*
@@ -566,15 +578,15 @@ void notrace cpu_init(void)
"msr cpsr_c, %9"
:
: "r" (stk),
- PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
"I" (offsetof(struct stack, irq[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
"I" (offsetof(struct stack, abt[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
"I" (offsetof(struct stack, und[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
"I" (offsetof(struct stack, fiq[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
+ PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
: "r14");
#endif
}
@@ -667,28 +679,33 @@ static void __init smp_build_mpidr_hash(void)
}
#endif
-static void __init setup_processor(void)
+/*
+ * locate processor in the list of supported processor types. The linker
+ * builds this table for us from the entries in arch/arm/mm/proc-*.S
+ */
+struct proc_info_list *lookup_processor(u32 midr)
{
- struct proc_info_list *list;
+ struct proc_info_list *list = lookup_processor_type(midr);
- /*
- * locate processor in the list of supported processor
- * types. The linker builds this table for us from the
- * entries in arch/arm/mm/proc-*.S
- */
- list = lookup_processor_type(read_cpuid_id());
if (!list) {
- pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
- read_cpuid_id());
- while (1);
+ pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
+ smp_processor_id(), midr);
+ while (1)
+ /* can't use cpu_relax() here as it may require MMU setup */;
}
+ return list;
+}
+
+static void __init setup_processor(void)
+{
+ unsigned int midr = read_cpuid_id();
+ struct proc_info_list *list = lookup_processor(midr);
+
cpu_name = list->cpu_name;
__cpu_architecture = __get_cpu_architecture();
-#ifdef MULTI_CPU
- processor = *list->proc;
-#endif
+ init_proc_vtable(list->proc);
#ifdef MULTI_TLB
cpu_tlb = *list->tlb;
#endif
@@ -700,7 +717,7 @@ static void __init setup_processor(void)
#endif
pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
- cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
+ list->cpu_name, midr, midr & 15,
proc_arch[cpu_architecture()], get_cr());
snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
@@ -754,10 +771,10 @@ int __init arm_add_memory(u64 start, u64 size)
else
size -= aligned_start - start;
-#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
+#ifndef CONFIG_PHYS_ADDR_T_64BIT
if (aligned_start > ULONG_MAX) {
pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
- (long long)start);
+ start);
return -EINVAL;
}
@@ -837,38 +854,44 @@ early_param("mem", early_mem);
static void __init request_standard_resources(const struct machine_desc *mdesc)
{
- struct memblock_region *region;
+ phys_addr_t start, end, res_end;
struct resource *res;
+ u64 i;
kernel_code.start = virt_to_phys(_text);
kernel_code.end = virt_to_phys(__init_begin - 1);
kernel_data.start = virt_to_phys(_sdata);
kernel_data.end = virt_to_phys(_end - 1);
- for_each_memblock(memory, region) {
- phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
- phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
+ for_each_mem_range(i, &start, &end) {
unsigned long boot_alias_start;
/*
+ * In memblock, end points to the first byte after the
+ * range while in resourses, end points to the last byte in
+ * the range.
+ */
+ res_end = end - 1;
+
+ /*
* Some systems have a special memory alias which is only
* used for booting. We need to advertise this region to
* kexec-tools so they know where bootable RAM is located.
*/
boot_alias_start = phys_to_idmap(start);
if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
- res = memblock_virt_alloc(sizeof(*res), 0);
+ res = memblock_alloc_or_panic(sizeof(*res), SMP_CACHE_BYTES);
res->name = "System RAM (boot alias)";
res->start = boot_alias_start;
- res->end = phys_to_idmap(end);
+ res->end = phys_to_idmap(res_end);
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res);
}
- res = memblock_virt_alloc(sizeof(*res), 0);
+ res = memblock_alloc_or_panic(sizeof(*res), SMP_CACHE_BYTES);
res->name = "System RAM";
res->start = start;
- res->end = end;
+ res->end = res_end;
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res);
@@ -899,9 +922,8 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
request_resource(&ioport_resource, &lp2);
}
-#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
- defined(CONFIG_EFI)
-struct screen_info screen_info = {
+#if defined(CONFIG_VGA_CONSOLE)
+struct screen_info vgacon_screen_info = {
.orig_video_lines = 30,
.orig_video_cols = 80,
.orig_video_mode = 0,
@@ -951,7 +973,7 @@ static int __init init_machine_late(void)
}
late_initcall(init_machine_late);
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_CRASH_RESERVE
/*
* The crash region must be aligned to 128MB to avoid
* zImage relocating below the reserved region.
@@ -981,8 +1003,10 @@ static void __init reserve_crashkernel(void)
total_mem = get_total_mem();
ret = parse_crashkernel(boot_command_line, total_mem,
- &crash_size, &crash_base);
- if (ret)
+ &crash_size, &crash_base,
+ NULL, NULL, NULL);
+ /* invalid value specified or crashkernel=0 */
+ if (ret || !crash_size)
return;
if (crash_base <= 0) {
@@ -990,31 +1014,25 @@ static void __init reserve_crashkernel(void)
unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
if (crash_max > lowmem_max)
crash_max = lowmem_max;
- crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
- crash_size, CRASH_ALIGN);
+
+ crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
+ CRASH_ALIGN, crash_max);
if (!crash_base) {
pr_err("crashkernel reservation failed - No suitable area found.\n");
return;
}
} else {
+ unsigned long long crash_max = crash_base + crash_size;
unsigned long long start;
- start = memblock_find_in_range(crash_base,
- crash_base + crash_size,
- crash_size, SECTION_SIZE);
- if (start != crash_base) {
+ start = memblock_phys_alloc_range(crash_size, SECTION_SIZE,
+ crash_base, crash_max);
+ if (!start) {
pr_err("crashkernel reservation failed - memory is in use.\n");
return;
}
}
- ret = memblock_reserve(crash_base, crash_size);
- if (ret < 0) {
- pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
- (unsigned long)crash_base);
- return;
- }
-
pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
(unsigned long)(crash_size >> 20),
(unsigned long)(crash_base >> 20),
@@ -1042,7 +1060,7 @@ static void __init reserve_crashkernel(void)
}
#else
static inline void reserve_crashkernel(void) {}
-#endif /* CONFIG_KEXEC */
+#endif /* CONFIG_CRASH_RESERVE*/
void __init hyp_mode_check(void)
{
@@ -1061,14 +1079,46 @@ void __init hyp_mode_check(void)
#endif
}
+static void (*__arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
+
+static int arm_restart(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ __arm_pm_restart(action, data);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block arm_restart_nb = {
+ .notifier_call = arm_restart,
+ .priority = 128,
+};
+
void __init setup_arch(char **cmdline_p)
{
- const struct machine_desc *mdesc;
+ const struct machine_desc *mdesc = NULL;
+ void *atags_vaddr = NULL;
+
+ if (__atags_pointer)
+ atags_vaddr = FDT_VIRT_BASE(__atags_pointer);
setup_processor();
- mdesc = setup_machine_fdt(__atags_pointer);
+ if (atags_vaddr) {
+ mdesc = setup_machine_fdt(atags_vaddr);
+ if (mdesc)
+ memblock_reserve(__atags_pointer,
+ fdt_totalsize(atags_vaddr));
+ }
if (!mdesc)
- mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
+ mdesc = setup_machine_tags(atags_vaddr, __machine_arch_type);
+ if (!mdesc) {
+ early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
+ early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
+ __atags_pointer);
+ if (__atags_pointer)
+ early_print(" r2[]=%*ph\n", 16, atags_vaddr);
+ dump_machine_table();
+ }
+
machine_desc = mdesc;
machine_name = mdesc->name;
dump_stack_set_arch_desc("%s", mdesc->name);
@@ -1076,13 +1126,10 @@ void __init setup_arch(char **cmdline_p)
if (mdesc->reboot_mode != REBOOT_HARD)
reboot_mode = mdesc->reboot_mode;
- init_mm.start_code = (unsigned long) _text;
- init_mm.end_code = (unsigned long) _etext;
- init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = (unsigned long) _end;
+ setup_initial_init_mm(_text, _etext, _edata, _end);
/* populate cmd_line too for later use, preserving boot_command_line */
- strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
+ strscpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = cmd_line;
early_fixmap_init();
@@ -1095,10 +1142,10 @@ void __init setup_arch(char **cmdline_p)
#endif
setup_dma_zone(mdesc);
xen_early_init();
- efi_init();
+ arm_efi_init();
/*
* Make sure the calculation for lowmem/highmem is set appropriately
- * before reserving/allocating any mmeory
+ * before reserving/allocating any memory
*/
adjust_lowmem_bounds();
arm_memblock_init(mdesc);
@@ -1108,10 +1155,13 @@ void __init setup_arch(char **cmdline_p)
early_ioremap_reset();
paging_init(mdesc);
+ kasan_init();
request_standard_resources(mdesc);
- if (mdesc->restart)
- arm_pm_restart = mdesc->restart;
+ if (mdesc->restart) {
+ __arm_pm_restart = mdesc->restart;
+ register_restart_handler(&arm_restart_nb);
+ }
unflatten_device_tree();
@@ -1135,15 +1185,9 @@ void __init setup_arch(char **cmdline_p)
reserve_crashkernel();
-#ifdef CONFIG_MULTI_IRQ_HANDLER
- handle_arch_irq = mdesc->handle_irq;
-#endif
-
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
- conswitchp = &vga_con;
-#elif defined(CONFIG_DUMMY_CONSOLE)
- conswitchp = &dummy_con;
+ vgacon_register_screen(&vgacon_screen_info);
#endif
#endif
@@ -1151,20 +1195,10 @@ void __init setup_arch(char **cmdline_p)
mdesc->init_early();
}
-
-static int __init topology_init(void)
+bool arch_cpu_is_hotpluggable(int num)
{
- int cpu;
-
- for_each_possible_cpu(cpu) {
- struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
- cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
- register_cpu(&cpuinfo->cpu, cpu);
- }
-
- return 0;
+ return platform_can_hotplug_cpu(num);
}
-subsys_initcall(topology_init);
#ifdef CONFIG_HAVE_PROC_CPU
static int __init proc_cpu_init(void)
@@ -1202,6 +1236,12 @@ static const char *hwcap_str[] = {
"vfpd32",
"lpae",
"evtstrm",
+ "fphp",
+ "asimdhp",
+ "asimddp",
+ "asimdfhm",
+ "asimdbf16",
+ "i8mm",
NULL
};
@@ -1211,6 +1251,8 @@ static const char *hwcap2_str[] = {
"sha1",
"sha2",
"crc32",
+ "sb",
+ "ssbs",
NULL
};
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 5814298ef0b7..79a6730fa0eb 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -1,64 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/signal.c
*
* Copyright (C) 1995-2009 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/errno.h>
#include <linux/random.h>
#include <linux/signal.h>
#include <linux/personality.h>
#include <linux/uaccess.h>
-#include <linux/tracehook.h>
+#include <linux/resume_user_mode.h>
#include <linux/uprobes.h>
+#include <linux/syscalls.h>
#include <asm/elf.h>
#include <asm/cacheflush.h>
#include <asm/traps.h>
-#include <asm/ucontext.h>
#include <asm/unistd.h>
#include <asm/vfp.h>
+#include <asm/syscalls.h>
-extern const unsigned long sigreturn_codes[7];
-
-static unsigned long signal_return_offset;
-
-#ifdef CONFIG_CRUNCH
-static int preserve_crunch_context(struct crunch_sigframe __user *frame)
-{
- char kbuf[sizeof(*frame) + 8];
- struct crunch_sigframe *kframe;
-
- /* the crunch context must be 64 bit aligned */
- kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
- kframe->magic = CRUNCH_MAGIC;
- kframe->size = CRUNCH_STORAGE_SIZE;
- crunch_task_copy(current_thread_info(), &kframe->storage);
- return __copy_to_user(frame, kframe, sizeof(*frame));
-}
+#include "signal.h"
-static int restore_crunch_context(char __user **auxp)
-{
- struct crunch_sigframe __user *frame =
- (struct crunch_sigframe __user *)*auxp;
- char kbuf[sizeof(*frame) + 8];
- struct crunch_sigframe *kframe;
+extern const unsigned long sigreturn_codes[17];
- /* the crunch context must be 64 bit aligned */
- kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
- if (__copy_from_user(kframe, frame, sizeof(*frame)))
- return -1;
- if (kframe->magic != CRUNCH_MAGIC ||
- kframe->size != CRUNCH_STORAGE_SIZE)
- return -1;
- *auxp += CRUNCH_STORAGE_SIZE;
- crunch_task_restore(current_thread_info(), &kframe->storage);
- return 0;
-}
-#endif
+static unsigned long signal_return_offset;
#ifdef CONFIG_IWMMXT
@@ -75,8 +41,6 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
kframe->magic = IWMMXT_MAGIC;
kframe->size = IWMMXT_STORAGE_SIZE;
iwmmxt_task_copy(current_thread_info(), &kframe->storage);
-
- err = __copy_to_user(frame, kframe, sizeof(*frame));
} else {
/*
* For bug-compatibility with older kernels, some space
@@ -84,10 +48,14 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
* Set the magic and size appropriately so that properly
* written userspace can skip it reliably:
*/
- __put_user_error(DUMMY_MAGIC, &frame->magic, err);
- __put_user_error(IWMMXT_STORAGE_SIZE, &frame->size, err);
+ *kframe = (struct iwmmxt_sigframe) {
+ .magic = DUMMY_MAGIC,
+ .size = IWMMXT_STORAGE_SIZE,
+ };
}
+ err = __copy_to_user(frame, kframe, sizeof(*kframe));
+
return err;
}
@@ -133,37 +101,34 @@ static int restore_iwmmxt_context(char __user **auxp)
static int preserve_vfp_context(struct vfp_sigframe __user *frame)
{
- const unsigned long magic = VFP_MAGIC;
- const unsigned long size = VFP_STORAGE_SIZE;
+ struct vfp_sigframe kframe;
int err = 0;
- __put_user_error(magic, &frame->magic, err);
- __put_user_error(size, &frame->size, err);
+ memset(&kframe, 0, sizeof(kframe));
+ kframe.magic = VFP_MAGIC;
+ kframe.size = VFP_STORAGE_SIZE;
+ err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc);
if (err)
- return -EFAULT;
+ return err;
- return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
+ return __copy_to_user(frame, &kframe, sizeof(kframe));
}
static int restore_vfp_context(char __user **auxp)
{
- struct vfp_sigframe __user *frame =
- (struct vfp_sigframe __user *)*auxp;
- unsigned long magic;
- unsigned long size;
- int err = 0;
-
- __get_user_error(magic, &frame->magic, err);
- __get_user_error(size, &frame->size, err);
+ struct vfp_sigframe frame;
+ int err;
+ err = __copy_from_user(&frame, *auxp, sizeof(frame));
if (err)
- return -EFAULT;
- if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
+ return err;
+
+ if (frame.magic != VFP_MAGIC || frame.size != VFP_STORAGE_SIZE)
return -EINVAL;
- *auxp += size;
- return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
+ *auxp += sizeof(frame);
+ return vfp_restore_user_hwstate(&frame.ufp, &frame.ufp_exc);
}
#endif
@@ -171,18 +136,10 @@ static int restore_vfp_context(char __user **auxp)
/*
* Do a signal return; undo the signal stack. These are aligned to 64-bit.
*/
-struct sigframe {
- struct ucontext uc;
- unsigned long retcode[2];
-};
-
-struct rt_sigframe {
- struct siginfo info;
- struct sigframe sig;
-};
static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
{
+ struct sigcontext context;
char __user *aux;
sigset_t set;
int err;
@@ -191,31 +148,30 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
if (err == 0)
set_current_blocked(&set);
- __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
- __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
- __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
- __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
- __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
- __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
- __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
- __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
- __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
- __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
- __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
- __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
- __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
- __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
- __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
- __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
- __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
+ err |= __copy_from_user(&context, &sf->uc.uc_mcontext, sizeof(context));
+ if (err == 0) {
+ regs->ARM_r0 = context.arm_r0;
+ regs->ARM_r1 = context.arm_r1;
+ regs->ARM_r2 = context.arm_r2;
+ regs->ARM_r3 = context.arm_r3;
+ regs->ARM_r4 = context.arm_r4;
+ regs->ARM_r5 = context.arm_r5;
+ regs->ARM_r6 = context.arm_r6;
+ regs->ARM_r7 = context.arm_r7;
+ regs->ARM_r8 = context.arm_r8;
+ regs->ARM_r9 = context.arm_r9;
+ regs->ARM_r10 = context.arm_r10;
+ regs->ARM_fp = context.arm_fp;
+ regs->ARM_ip = context.arm_ip;
+ regs->ARM_sp = context.arm_sp;
+ regs->ARM_lr = context.arm_lr;
+ regs->ARM_pc = context.arm_pc;
+ regs->ARM_cpsr = context.arm_cpsr;
+ }
err |= !valid_user_regs(regs);
aux = (char __user *) sf->uc.uc_regspace;
-#ifdef CONFIG_CRUNCH
- if (err == 0)
- err |= restore_crunch_context(&aux);
-#endif
#ifdef CONFIG_IWMMXT
if (err == 0)
err |= restore_iwmmxt_context(&aux);
@@ -245,7 +201,7 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs)
frame = (struct sigframe __user *)regs->ARM_sp;
- if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
+ if (!access_ok(frame, sizeof (*frame)))
goto badframe;
if (restore_sigframe(regs, frame))
@@ -254,7 +210,7 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs)
return regs->ARM_r0;
badframe:
- force_sig(SIGSEGV, current);
+ force_sig(SIGSEGV);
return 0;
}
@@ -275,7 +231,7 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
frame = (struct rt_sigframe __user *)regs->ARM_sp;
- if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
+ if (!access_ok(frame, sizeof (*frame)))
goto badframe;
if (restore_sigframe(regs, &frame->sig))
@@ -287,7 +243,7 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
return regs->ARM_r0;
badframe:
- force_sig(SIGSEGV, current);
+ force_sig(SIGSEGV);
return 0;
}
@@ -295,38 +251,39 @@ static int
setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
{
struct aux_sigframe __user *aux;
+ struct sigcontext context;
int err = 0;
- __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
- __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
- __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
- __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
- __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
- __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
- __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
- __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
- __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
- __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
- __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
- __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
- __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
- __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
- __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
- __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
- __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
-
- __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
- __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
- __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
- __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
+ context = (struct sigcontext) {
+ .arm_r0 = regs->ARM_r0,
+ .arm_r1 = regs->ARM_r1,
+ .arm_r2 = regs->ARM_r2,
+ .arm_r3 = regs->ARM_r3,
+ .arm_r4 = regs->ARM_r4,
+ .arm_r5 = regs->ARM_r5,
+ .arm_r6 = regs->ARM_r6,
+ .arm_r7 = regs->ARM_r7,
+ .arm_r8 = regs->ARM_r8,
+ .arm_r9 = regs->ARM_r9,
+ .arm_r10 = regs->ARM_r10,
+ .arm_fp = regs->ARM_fp,
+ .arm_ip = regs->ARM_ip,
+ .arm_sp = regs->ARM_sp,
+ .arm_lr = regs->ARM_lr,
+ .arm_pc = regs->ARM_pc,
+ .arm_cpsr = regs->ARM_cpsr,
+
+ .trap_no = current->thread.trap_no,
+ .error_code = current->thread.error_code,
+ .fault_address = current->thread.address,
+ .oldmask = set->sig[0],
+ };
+
+ err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context));
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
-#ifdef CONFIG_CRUNCH
- if (err == 0)
- err |= preserve_crunch_context(&aux->crunch);
-#endif
#ifdef CONFIG_IWMMXT
if (err == 0)
err |= preserve_iwmmxt_context(&aux->iwmmxt);
@@ -335,7 +292,7 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
if (err == 0)
err |= preserve_vfp_context(&aux->vfp);
#endif
- __put_user_error(0, &aux->end_magic, err);
+ err |= __put_user(0, &aux->end_magic);
return err;
}
@@ -354,7 +311,7 @@ get_sigframe(struct ksignal *ksig, struct pt_regs *regs, int framesize)
/*
* Check that we can actually write to the signal frame.
*/
- if (!access_ok(VERIFY_WRITE, frame, framesize))
+ if (!access_ok(frame, framesize))
frame = NULL;
return frame;
@@ -365,9 +322,20 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
unsigned long __user *rc, void __user *frame)
{
unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler;
+ unsigned long handler_fdpic_GOT = 0;
unsigned long retcode;
- int thumb = 0;
+ unsigned int idx, thumb = 0;
unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
+ bool fdpic = IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) &&
+ (current->personality & FDPIC_FUNCPTRS);
+
+ if (fdpic) {
+ unsigned long __user *fdpic_func_desc =
+ (unsigned long __user *)handler;
+ if (__get_user(handler, &fdpic_func_desc[0]) ||
+ __get_user(handler_fdpic_GOT, &fdpic_func_desc[1]))
+ return 1;
+ }
cpsr |= PSR_ENDSTATE;
@@ -407,9 +375,26 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
if (ksig->ka.sa.sa_flags & SA_RESTORER) {
retcode = (unsigned long)ksig->ka.sa.sa_restorer;
+ if (fdpic) {
+ /*
+ * We need code to load the function descriptor.
+ * That code follows the standard sigreturn code
+ * (6 words), and is made of 3 + 2 words for each
+ * variant. The 4th copied word is the actual FD
+ * address that the assembly code expects.
+ */
+ idx = 6 + thumb * 3;
+ if (ksig->ka.sa.sa_flags & SA_SIGINFO)
+ idx += 5;
+ if (__put_user(sigreturn_codes[idx], rc ) ||
+ __put_user(sigreturn_codes[idx+1], rc+1) ||
+ __put_user(sigreturn_codes[idx+2], rc+2) ||
+ __put_user(retcode, rc+3))
+ return 1;
+ goto rc_finish;
+ }
} else {
- unsigned int idx = thumb << 1;
-
+ idx = thumb << 1;
if (ksig->ka.sa.sa_flags & SA_SIGINFO)
idx += 3;
@@ -421,6 +406,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
__put_user(sigreturn_codes[idx+1], rc+1))
return 1;
+rc_finish:
#ifdef CONFIG_MMU
if (cpsr & MODE32_BIT) {
struct mm_struct *mm = current->mm;
@@ -440,7 +426,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
* the return code written onto the stack.
*/
flush_icache_range((unsigned long)rc,
- (unsigned long)(rc + 2));
+ (unsigned long)(rc + 3));
retcode = ((unsigned long)rc) + thumb;
}
@@ -450,6 +436,8 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
regs->ARM_sp = (unsigned long)frame;
regs->ARM_lr = retcode;
regs->ARM_pc = handler;
+ if (fdpic)
+ regs->ARM_r9 = handler_fdpic_GOT;
regs->ARM_cpsr = cpsr;
return 0;
@@ -467,7 +455,7 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
/*
* Set uc.uc_flags to a value which sc.trap_no would never have.
*/
- __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
+ err = __put_user(0x5ac3c35a, &frame->uc.uc_flags);
err |= setup_sigframe(frame, regs, set);
if (err == 0)
@@ -487,8 +475,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
- __put_user_error(0, &frame->sig.uc.uc_flags, err);
- __put_user_error(NULL, &frame->sig.uc.uc_link, err);
+ err |= __put_user(0, &frame->sig.uc.uc_flags);
+ err |= __put_user(NULL, &frame->sig.uc.uc_link);
err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
err |= setup_sigframe(&frame->sig, regs, set);
@@ -517,6 +505,11 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
int ret;
/*
+ * Perform fixup for the pre-signal frame.
+ */
+ rseq_signal_deliver(ksig, regs);
+
+ /*
* Set up the stack frame
*/
if (ksig->ka.sa.sa_flags & SA_SIGINFO)
@@ -562,6 +555,7 @@ static int do_signal(struct pt_regs *regs, int syscall)
switch (retval) {
case -ERESTART_RESTARTBLOCK:
restart -= 2;
+ fallthrough;
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
@@ -620,7 +614,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
if (unlikely(!user_mode(regs)))
return 0;
local_irq_enable();
- if (thread_flags & _TIF_SIGPENDING) {
+ if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) {
int restart = do_signal(regs, syscall);
if (unlikely(restart)) {
/*
@@ -634,12 +628,11 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
} else if (thread_flags & _TIF_UPROBE) {
uprobe_notify_resume(regs);
} else {
- clear_thread_flag(TIF_NOTIFY_RESUME);
- tracehook_notify_resume(regs);
+ resume_user_mode_work(regs);
}
}
local_irq_disable();
- thread_flags = current_thread_info()->flags;
+ thread_flags = read_thread_flags();
} while (thread_flags & _TIF_WORK_MASK);
return 0;
}
@@ -658,18 +651,67 @@ struct page *get_signal_page(void)
addr = page_address(page);
+ /* Poison the entire page */
+ memset32(addr, __opcode_to_mem_arm(0xe7fddef1),
+ PAGE_SIZE / sizeof(u32));
+
/* Give the signal return code some randomness */
- offset = 0x200 + (get_random_int() & 0x7fc);
+ offset = 0x200 + (get_random_u16() & 0x7fc);
signal_return_offset = offset;
- /*
- * Copy signal return handlers into the vector page, and
- * set sigreturn to be a pointer to these.
- */
+ /* Copy signal return handlers into the page */
memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
- ptr = (unsigned long)addr + offset;
- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
+ /* Flush out all instructions in this page */
+ ptr = (unsigned long)addr;
+ flush_icache_range(ptr, ptr + PAGE_SIZE);
return page;
}
+
+#ifdef CONFIG_DEBUG_RSEQ
+asmlinkage void do_rseq_syscall(struct pt_regs *regs)
+{
+ rseq_syscall(regs);
+}
+#endif
+
+/*
+ * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as
+ * changes likely come with new fields that should be added below.
+ */
+static_assert(NSIGILL == 11);
+static_assert(NSIGFPE == 15);
+static_assert(NSIGSEGV == 10);
+static_assert(NSIGBUS == 5);
+static_assert(NSIGTRAP == 6);
+static_assert(NSIGCHLD == 6);
+static_assert(NSIGSYS == 2);
+static_assert(sizeof(siginfo_t) == 128);
+static_assert(__alignof__(siginfo_t) == 4);
+static_assert(offsetof(siginfo_t, si_signo) == 0x00);
+static_assert(offsetof(siginfo_t, si_errno) == 0x04);
+static_assert(offsetof(siginfo_t, si_code) == 0x08);
+static_assert(offsetof(siginfo_t, si_pid) == 0x0c);
+static_assert(offsetof(siginfo_t, si_uid) == 0x10);
+static_assert(offsetof(siginfo_t, si_tid) == 0x0c);
+static_assert(offsetof(siginfo_t, si_overrun) == 0x10);
+static_assert(offsetof(siginfo_t, si_status) == 0x14);
+static_assert(offsetof(siginfo_t, si_utime) == 0x18);
+static_assert(offsetof(siginfo_t, si_stime) == 0x1c);
+static_assert(offsetof(siginfo_t, si_value) == 0x14);
+static_assert(offsetof(siginfo_t, si_int) == 0x14);
+static_assert(offsetof(siginfo_t, si_ptr) == 0x14);
+static_assert(offsetof(siginfo_t, si_addr) == 0x0c);
+static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x10);
+static_assert(offsetof(siginfo_t, si_lower) == 0x14);
+static_assert(offsetof(siginfo_t, si_upper) == 0x18);
+static_assert(offsetof(siginfo_t, si_pkey) == 0x14);
+static_assert(offsetof(siginfo_t, si_perf_data) == 0x10);
+static_assert(offsetof(siginfo_t, si_perf_type) == 0x14);
+static_assert(offsetof(siginfo_t, si_perf_flags) == 0x18);
+static_assert(offsetof(siginfo_t, si_band) == 0x0c);
+static_assert(offsetof(siginfo_t, si_fd) == 0x10);
+static_assert(offsetof(siginfo_t, si_call_addr) == 0x0c);
+static_assert(offsetof(siginfo_t, si_syscall) == 0x10);
+static_assert(offsetof(siginfo_t, si_arch) == 0x14);
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h
new file mode 100644
index 000000000000..cb076d30ab38
--- /dev/null
+++ b/arch/arm/kernel/signal.h
@@ -0,0 +1,13 @@
+#include <asm/ucontext.h>
+
+struct sigframe {
+ struct ucontext uc;
+ unsigned long retcode[4];
+};
+
+struct rt_sigframe {
+ struct siginfo info;
+ struct sigframe sig;
+};
+
+extern struct page *get_signal_page(void);
diff --git a/arch/arm/kernel/sigreturn_codes.S b/arch/arm/kernel/sigreturn_codes.S
index b84d0cb13682..7540ec51d16c 100644
--- a/arch/arm/kernel/sigreturn_codes.S
+++ b/arch/arm/kernel/sigreturn_codes.S
@@ -1,19 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* sigreturn_codes.S - code sinpets for sigreturn syscalls
*
* Created by: Victor Kamensky, 2013-08-13
* Copyright: (C) 2013 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
#include <asm/unistd.h>
/*
@@ -51,6 +45,17 @@ ARM_OK( .arm )
.thumb
.endm
+ .macro arm_fdpic_slot n
+ .org sigreturn_codes + 24 + 20 * (\n)
+ARM_OK( .arm )
+ .endm
+
+ .macro thumb_fdpic_slot n
+ .org sigreturn_codes + 24 + 20 * (\n) + 12
+ .thumb
+ .endm
+
+
#if __LINUX_ARM_ARCH__ <= 4
/*
* Note we manually set minimally required arch that supports
@@ -90,13 +95,46 @@ ARM_OK( swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE) )
movs r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
swi #0
+ /* ARM sigreturn restorer FDPIC bounce code snippet */
+ arm_fdpic_slot 0
+ARM_OK( ldr r3, [sp, #SIGFRAME_RC3_OFFSET] )
+ARM_OK( ldmia r3, {r3, r9} )
+#ifdef CONFIG_ARM_THUMB
+ARM_OK( bx r3 )
+#else
+ARM_OK( ret r3 )
+#endif
+
+ /* Thumb sigreturn restorer FDPIC bounce code snippet */
+ thumb_fdpic_slot 0
+ ldr r3, [sp, #SIGFRAME_RC3_OFFSET]
+ ldmia r3, {r2, r3}
+ mov r9, r3
+ bx r2
+
+ /* ARM sigreturn_rt restorer FDPIC bounce code snippet */
+ arm_fdpic_slot 1
+ARM_OK( ldr r3, [sp, #RT_SIGFRAME_RC3_OFFSET] )
+ARM_OK( ldmia r3, {r3, r9} )
+#ifdef CONFIG_ARM_THUMB
+ARM_OK( bx r3 )
+#else
+ARM_OK( ret r3 )
+#endif
+
+ /* Thumb sigreturn_rt restorer FDPIC bounce code snippet */
+ thumb_fdpic_slot 1
+ ldr r3, [sp, #RT_SIGFRAME_RC3_OFFSET]
+ ldmia r3, {r2, r3}
+ mov r9, r3
+ bx r2
+
/*
- * Note on addtional space: setup_return in signal.c
- * algorithm uses two words copy regardless whether
- * it is thumb case or not, so we need additional
- * word after real last entry.
+ * Note on additional space: setup_return in signal.c
+ * always copies the same number of words regardless whether
+ * it is thumb case or not, so we need one additional padding
+ * word after the last entry.
*/
- arm_slot 2
.space 4
.size sigreturn_codes, . - sigreturn_codes
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 0f6c1000582c..93afd1005b43 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <linux/threads.h>
#include <asm/asm-offsets.h>
@@ -66,13 +67,20 @@ ENTRY(__cpu_suspend)
ldr r4, =cpu_suspend_size
#endif
mov r5, sp @ current virtual SP
+#ifdef CONFIG_VMAP_STACK
+ @ Run the suspend code from the overflow stack so we don't have to rely
+ @ on vmalloc-to-phys conversions anywhere in the arch suspend code.
+ @ The original SP value captured in R5 will be restored on the way out.
+ ldr_this_cpu sp, overflow_stack_ptr, r6, r7
+#endif
add r4, r4, #12 @ Space for pgd, virt sp, phys resume fn
sub sp, sp, r4 @ allocate CPU state on stack
ldr r3, =sleep_save_sp
stmfd sp!, {r0, r1} @ save suspend func arg and pointer
ldr r3, [r3, #SLEEP_SAVE_SP_VIRT]
- ALT_SMP(ldr r0, =mpidr_hash)
+ ALT_SMP(W(nop)) @ don't use adr_l inside ALT_SMP()
ALT_UP_B(1f)
+ adr_l r0, mpidr_hash
/* This ldmia relies on the memory layout of the mpidr_hash struct */
ldmia r0, {r1, r6-r8} @ r1 = mpidr mask (r6,r7,r8) = l[0,1,2] shifts
compute_mpidr_hash r0, r6, r7, r8, r2, r1
@@ -111,7 +119,18 @@ ENTRY(cpu_resume_mmu)
ENDPROC(cpu_resume_mmu)
.popsection
cpu_resume_after_mmu:
+#if defined(CONFIG_VMAP_STACK) && !defined(CONFIG_ARM_LPAE)
+ @ Before using the vmap'ed stack, we have to switch to swapper_pg_dir
+ @ as the ID map does not cover the vmalloc region.
+ mrc p15, 0, ip, c2, c0, 1 @ read TTBR1
+ mcr p15, 0, ip, c2, c0, 0 @ set TTBR0
+ instr_sync
+#endif
bl cpu_init @ restore the und/abt/irq banked regs
+#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
+ mov r0, sp
+ bl kasan_unpoison_task_stack_below
+#endif
mov r0, #0 @ return zero on success
ldmfd sp!, {r4 - r11, pc}
ENDPROC(cpu_resume_after_mmu)
@@ -119,6 +138,14 @@ ENDPROC(cpu_resume_after_mmu)
.text
.align
+#ifdef CONFIG_MCPM
+ .arm
+THUMB( .thumb )
+ENTRY(cpu_resume_no_hyp)
+ARM_BE8(setend be) @ ensure we are in BE mode
+ b no_hyp
+#endif
+
#ifdef CONFIG_MMU
.arm
ENTRY(cpu_resume_arm)
@@ -134,12 +161,12 @@ ARM_BE8(setend be) @ ensure we are in BE mode
bl __hyp_stub_install_secondary
#endif
safe_svcmode_maskall r1
+no_hyp:
mov r1, #0
ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
ALT_UP_B(1f)
- adr r2, mpidr_hash_ptr
- ldr r3, [r2]
- add r2, r2, r3 @ r2 = struct mpidr_hash phys address
+ adr_l r2, mpidr_hash @ r2 = struct mpidr_hash phys address
+
/*
* This ldmia relies on the memory layout of the mpidr_hash
* struct mpidr_hash.
@@ -147,10 +174,7 @@ ARM_BE8(setend be) @ ensure we are in BE mode
ldmia r2, { r3-r6 } @ r3 = mpidr mask (r4,r5,r6) = l[0,1,2] shifts
compute_mpidr_hash r1, r4, r5, r6, r0, r3
1:
- adr r0, _sleep_save_sp
- ldr r2, [r0]
- add r0, r0, r2
- ldr r0, [r0, #SLEEP_SAVE_SP_PHYS]
+ ldr_l r0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
ldr r0, [r0, r1, lsl #2]
@ load phys pgd, stack, resume fn
@@ -163,14 +187,12 @@ ENDPROC(cpu_resume)
#ifdef CONFIG_MMU
ENDPROC(cpu_resume_arm)
#endif
-
- .align 2
-_sleep_save_sp:
- .long sleep_save_sp - .
-mpidr_hash_ptr:
- .long mpidr_hash - . @ mpidr_hash struct offset
+#ifdef CONFIG_MCPM
+ENDPROC(cpu_resume_no_hyp)
+#endif
.data
+ .align 2
.type sleep_save_sp, #object
ENTRY(sleep_save_sp)
.space SLEEP_SAVE_SP_SZ @ struct sleep_save_sp
diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S
index e5d43066b889..931df62a7831 100644
--- a/arch/arm/kernel/smccc-call.S
+++ b/arch/arm/kernel/smccc-call.S
@@ -1,18 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015, Linaro Limited
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/linkage.h>
+#include <linux/arm-smccc.h>
+#include <asm/asm-offsets.h>
#include <asm/opcodes-sec.h>
#include <asm/opcodes-virt.h>
#include <asm/unwind.h>
@@ -36,7 +29,14 @@ UNWIND( .fnstart)
UNWIND( .save {r4-r7})
ldm r12, {r4-r7}
\instr
- pop {r4-r7}
+ ldr r4, [sp, #36]
+ cmp r4, #0
+ beq 1f // No quirk structure
+ ldr r5, [r4, #ARM_SMCCC_QUIRK_ID_OFFS]
+ cmp r5, #ARM_SMCCC_QUIRK_QCOM_A6
+ bne 1f // No quirk present
+ str r6, [r4, #ARM_SMCCC_QUIRK_STATE_OFFS]
+1: pop {r4-r7}
ldr r12, [sp, #(4 * 4)]
stm r12, {r0-r3}
bx lr
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index c9a0a5299827..50999886a8b5 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/smp.c
*
* Copyright (C) 2002 ARM Limited, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/delay.h>
@@ -29,8 +26,10 @@
#include <linux/completion.h>
#include <linux/cpufreq.h>
#include <linux/irq_work.h>
+#include <linux/kernel_stat.h>
#include <linux/atomic.h>
+#include <asm/bugs.h>
#include <asm/smp.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
@@ -39,8 +38,7 @@
#include <asm/idmap.h>
#include <asm/topology.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
+#include <asm/procinfo.h>
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
@@ -50,7 +48,6 @@
#include <asm/mach/arch.h>
#include <asm/mpu.h>
-#define CREATE_TRACE_POINTS
#include <trace/events/ipi.h>
/*
@@ -60,12 +57,6 @@
*/
struct secondary_data secondary_data;
-/*
- * control for which core is the next to come out of the secondary
- * boot "holding pen"
- */
-volatile int pen_release = -1;
-
enum ipi_msg_type {
IPI_WAKEUP,
IPI_TIMER,
@@ -74,14 +65,26 @@ enum ipi_msg_type {
IPI_CPU_STOP,
IPI_IRQ_WORK,
IPI_COMPLETION,
- IPI_CPU_BACKTRACE,
+ NR_IPI,
+ /*
+ * CPU_BACKTRACE is special and not included in NR_IPI
+ * or tracable with trace_ipi_*
+ */
+ IPI_CPU_BACKTRACE = NR_IPI,
/*
* SGI8-15 can be reserved by secure firmware, and thus may
* not be usable by the kernel. Please keep the above limited
* to at most 8 entries.
*/
+ MAX_IPI
};
+static int ipi_irq_base __read_mostly;
+static int nr_ipi __read_mostly = NR_IPI;
+static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly;
+
+static void ipi_setup(int cpu);
+
static DECLARE_COMPLETION(cpu_running);
static struct smp_operations smp_ops __ro_after_init;
@@ -101,6 +104,30 @@ static unsigned long get_arch_pgd(pgd_t *pgd)
#endif
}
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+static int secondary_biglittle_prepare(unsigned int cpu)
+{
+ if (!cpu_vtable[cpu])
+ cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
+
+ return cpu_vtable[cpu] ? 0 : -ENOMEM;
+}
+
+static void secondary_biglittle_init(void)
+{
+ init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
+}
+#else
+static int secondary_biglittle_prepare(unsigned int cpu)
+{
+ return 0;
+}
+
+static void secondary_biglittle_init(void)
+{
+}
+#endif
+
int __cpu_up(unsigned int cpu, struct task_struct *idle)
{
int ret;
@@ -108,19 +135,24 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
if (!smp_ops.smp_boot_secondary)
return -ENOSYS;
+ ret = secondary_biglittle_prepare(cpu);
+ if (ret)
+ return ret;
+
/*
* We need to tell the secondary core where to find
* its stack and the page tables.
*/
secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
#ifdef CONFIG_ARM_MPU
- secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr;
+ secondary_data.mpu_rgn_info = &mpu_rgn_info;
#endif
#ifdef CONFIG_MMU
secondary_data.pgdir = virt_to_phys(idmap_pgd);
secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
#endif
+ secondary_data.task = idle;
sync_cache_w(&secondary_data);
/*
@@ -203,6 +235,17 @@ int platform_can_hotplug_cpu(unsigned int cpu)
return cpu != 0;
}
+static void ipi_teardown(int cpu)
+{
+ int i;
+
+ if (WARN_ON_ONCE(!ipi_irq_base))
+ return;
+
+ for (i = 0; i < nr_ipi; i++)
+ disable_percpu_irq(ipi_irq_base + i);
+}
+
/*
* __cpu_disable runs on the processor to be shutdown.
*/
@@ -215,16 +258,21 @@ int __cpu_disable(void)
if (ret)
return ret;
+#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
+ remove_cpu_topology(cpu);
+#endif
+
/*
* Take this CPU offline. Once we clear this, we can't return,
* and we must not schedule until we're ready to give up the cpu.
*/
set_cpu_online(cpu, false);
+ ipi_teardown(cpu);
/*
* OK - migrate IRQs away from this CPU
*/
- migrate_irqs();
+ irq_migrate_all_off_this_cpu();
/*
* Flush user cache and TLB mappings, and then remove this CPU
@@ -236,25 +284,18 @@ int __cpu_disable(void)
flush_cache_louis();
local_flush_tlb_all();
- clear_tasks_mm_cpumask(cpu);
-
return 0;
}
-static DECLARE_COMPLETION(cpu_died);
-
/*
- * called on the thread which is asking for a CPU to be shutdown -
- * waits until shutdown has completed, or it is timed out.
+ * called on the thread which is asking for a CPU to be shutdown after the
+ * shutdown completed.
*/
-void __cpu_die(unsigned int cpu)
+void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
{
- if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
- pr_err("CPU%u: cpu didn't die\n", cpu);
- return;
- }
pr_debug("CPU%u: shutdown\n", cpu);
+ clear_tasks_mm_cpumask(cpu);
/*
* platform_cpu_kill() is generally expected to do the powering off
* and/or cutting of clocks to the dying CPU. Optionally, this may
@@ -274,7 +315,7 @@ void __cpu_die(unsigned int cpu)
* of the other hotplug-cpu capable cores, so presumably coming
* out of idle fixes this.
*/
-void arch_cpu_idle_dead(void)
+void __noreturn arch_cpu_idle_dead(void)
{
unsigned int cpu = smp_processor_id();
@@ -291,11 +332,11 @@ void arch_cpu_idle_dead(void)
flush_cache_louis();
/*
- * Tell __cpu_die() that this CPU is now safe to dispose of. Once
- * this returns, power and/or clocks can be removed at any point
- * from this CPU and its cache by platform_cpu_kill().
+ * Tell cpuhp_bp_sync_dead() that this CPU is now safe to dispose
+ * of. Once this returns, power and/or clocks can be removed at
+ * any point from this CPU and its cache by platform_cpu_kill().
*/
- complete(&cpu_died);
+ cpuhp_ap_report_dead();
/*
* Ensure that the cache lines associated with that completion are
@@ -330,9 +371,14 @@ void arch_cpu_idle_dead(void)
*/
__asm__("mov sp, %0\n"
" mov fp, #0\n"
+ " mov r0, %1\n"
" b secondary_start_kernel"
:
- : "r" (task_stack_page(current) + THREAD_SIZE - 8));
+ : "r" (task_stack_page(current) + THREAD_SIZE - 8),
+ "r" (current)
+ : "r0");
+
+ unreachable();
}
#endif /* CONFIG_HOTPLUG_CPU */
@@ -348,17 +394,28 @@ static void smp_store_cpu_info(unsigned int cpuid)
cpu_info->cpuid = read_cpuid_id();
store_cpu_topology(cpuid);
+ check_cpu_icache_size(cpuid);
+}
+
+static void set_current(struct task_struct *cur)
+{
+ /* Set TPIDRURO */
+ asm("mcr p15, 0, %0, c13, c0, 3" :: "r"(cur) : "memory");
}
/*
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
*/
-asmlinkage void secondary_start_kernel(void)
+asmlinkage void secondary_start_kernel(struct task_struct *task)
{
struct mm_struct *mm = &init_mm;
unsigned int cpu;
+ set_current(task);
+
+ secondary_biglittle_init();
+
/*
* The identity mapping is uncached (strongly ordered), so
* switch away from it before attempting any exclusive accesses.
@@ -379,9 +436,11 @@ asmlinkage void secondary_start_kernel(void)
cpu_init();
+#ifndef CONFIG_MMU
+ setup_vectors_base();
+#endif
pr_debug("CPU%u: Booted secondary processor\n", cpu);
- preempt_disable();
trace_hardirqs_off();
/*
@@ -392,6 +451,8 @@ asmlinkage void secondary_start_kernel(void)
notify_cpu_starting(cpu);
+ ipi_setup(cpu);
+
calibrate_delay();
smp_store_cpu_info(cpu);
@@ -402,6 +463,9 @@ asmlinkage void secondary_start_kernel(void)
* before we continue - which happens after __cpu_up returns.
*/
set_cpu_online(cpu, true);
+
+ check_other_bugs();
+
complete(&cpu_running);
local_irq_enable();
@@ -467,57 +531,36 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
}
-static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
-
-void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
-{
- if (!__smp_cross_call)
- __smp_cross_call = fn;
-}
-
static const char *ipi_types[NR_IPI] __tracepoint_string = {
-#define S(x,s) [x] = s
- S(IPI_WAKEUP, "CPU wakeup interrupts"),
- S(IPI_TIMER, "Timer broadcast interrupts"),
- S(IPI_RESCHEDULE, "Rescheduling interrupts"),
- S(IPI_CALL_FUNC, "Function call interrupts"),
- S(IPI_CPU_STOP, "CPU stop interrupts"),
- S(IPI_IRQ_WORK, "IRQ work interrupts"),
- S(IPI_COMPLETION, "completion interrupts"),
+ [IPI_WAKEUP] = "CPU wakeup interrupts",
+ [IPI_TIMER] = "Timer broadcast interrupts",
+ [IPI_RESCHEDULE] = "Rescheduling interrupts",
+ [IPI_CALL_FUNC] = "Function call interrupts",
+ [IPI_CPU_STOP] = "CPU stop interrupts",
+ [IPI_IRQ_WORK] = "IRQ work interrupts",
+ [IPI_COMPLETION] = "completion interrupts",
};
-static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
-{
- trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
- __smp_cross_call(target, ipinr);
-}
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
void show_ipi_list(struct seq_file *p, int prec)
{
unsigned int cpu, i;
for (i = 0; i < NR_IPI; i++) {
- seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
+ if (!ipi_desc[i])
+ continue;
+
+ seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
+ prec >= 4 ? " " : "");
for_each_online_cpu(cpu)
- seq_printf(p, "%10u ",
- __get_irq_stat(cpu, ipi_irqs[i]));
+ seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
seq_printf(p, " %s\n", ipi_types[i]);
}
}
-u64 smp_irq_stat_cpu(unsigned int cpu)
-{
- u64 sum = 0;
- int i;
-
- for (i = 0; i < NR_IPI; i++)
- sum += __get_irq_stat(cpu, ipi_irqs[i]);
-
- return sum;
-}
-
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
smp_cross_call(mask, IPI_CALL_FUNC);
@@ -555,6 +598,8 @@ static DEFINE_RAW_SPINLOCK(stop_lock);
*/
static void ipi_cpu_stop(unsigned int cpu)
{
+ local_fiq_disable();
+
if (system_state <= SYSTEM_RUNNING) {
raw_spin_lock(&stop_lock);
pr_crit("CPU%u: stopping\n", cpu);
@@ -564,11 +609,10 @@ static void ipi_cpu_stop(unsigned int cpu)
set_cpu_online(cpu, false);
- local_fiq_disable();
- local_irq_disable();
-
- while (1)
+ while (1) {
cpu_relax();
+ wfe();
+ }
}
static DEFINE_PER_CPU(struct completion *, cpu_completion);
@@ -587,20 +631,12 @@ static void ipi_complete(unsigned int cpu)
/*
* Main handler for inter-processor interrupts
*/
-asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
-{
- handle_IPI(ipinr, regs);
-}
-
-void handle_IPI(int ipinr, struct pt_regs *regs)
+static void do_handle_IPI(int ipinr)
{
unsigned int cpu = smp_processor_id();
- struct pt_regs *old_regs = set_irq_regs(regs);
- if ((unsigned)ipinr < NR_IPI) {
- trace_ipi_entry_rcuidle(ipi_types[ipinr]);
- __inc_irq_stat(cpu, ipi_irqs[ipinr]);
- }
+ if ((unsigned)ipinr < NR_IPI)
+ trace_ipi_entry(ipi_types[ipinr]);
switch (ipinr) {
case IPI_WAKEUP:
@@ -608,9 +644,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
case IPI_TIMER:
- irq_enter();
tick_receive_broadcast();
- irq_exit();
break;
#endif
@@ -619,37 +653,27 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
break;
case IPI_CALL_FUNC:
- irq_enter();
generic_smp_call_function_interrupt();
- irq_exit();
break;
case IPI_CPU_STOP:
- irq_enter();
ipi_cpu_stop(cpu);
- irq_exit();
break;
#ifdef CONFIG_IRQ_WORK
case IPI_IRQ_WORK:
- irq_enter();
irq_work_run();
- irq_exit();
break;
#endif
case IPI_COMPLETION:
- irq_enter();
ipi_complete(cpu);
- irq_exit();
break;
case IPI_CPU_BACKTRACE:
- printk_nmi_enter();
- irq_enter();
- nmi_cpu_backtrace(regs);
- irq_exit();
- printk_nmi_exit();
+ printk_deferred_enter();
+ nmi_cpu_backtrace(get_irq_regs());
+ printk_deferred_exit();
break;
default:
@@ -659,11 +683,69 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
}
if ((unsigned)ipinr < NR_IPI)
- trace_ipi_exit_rcuidle(ipi_types[ipinr]);
+ trace_ipi_exit(ipi_types[ipinr]);
+}
+
+/* Legacy version, should go away once all irqchips have been converted */
+void handle_IPI(int ipinr, struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+ do_handle_IPI(ipinr);
+ irq_exit();
+
set_irq_regs(old_regs);
}
-void smp_send_reschedule(int cpu)
+static irqreturn_t ipi_handler(int irq, void *data)
+{
+ do_handle_IPI(irq - ipi_irq_base);
+ return IRQ_HANDLED;
+}
+
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+ trace_ipi_raise(target, ipi_types[ipinr]);
+ __ipi_send_mask(ipi_desc[ipinr], target);
+}
+
+static void ipi_setup(int cpu)
+{
+ int i;
+
+ if (WARN_ON_ONCE(!ipi_irq_base))
+ return;
+
+ for (i = 0; i < nr_ipi; i++)
+ enable_percpu_irq(ipi_irq_base + i, 0);
+}
+
+void __init set_smp_ipi_range(int ipi_base, int n)
+{
+ int i;
+
+ WARN_ON(n < MAX_IPI);
+ nr_ipi = min(n, MAX_IPI);
+
+ for (i = 0; i < nr_ipi; i++) {
+ int err;
+
+ err = request_percpu_irq(ipi_base + i, ipi_handler,
+ "IPI", &irq_stat);
+ WARN_ON(err);
+
+ ipi_desc[i] = irq_to_desc(ipi_base + i);
+ irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
+ }
+
+ ipi_irq_base = ipi_base;
+
+ /* Setup the boot CPU immediately */
+ ipi_setup(smp_processor_id());
+}
+
+void arch_smp_send_reschedule(int cpu)
{
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}
@@ -687,12 +769,19 @@ void smp_send_stop(void)
pr_warn("SMP: failed to stop secondary CPUs\n");
}
-/*
- * not supported here
+/* In case panic() and panic() called at the same time on CPU1 and CPU2,
+ * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop()
+ * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online,
+ * kdump fails. So split out the panic_smp_self_stop() and add
+ * set_cpu_online(smp_processor_id(), false).
*/
-int setup_profiling_timer(unsigned int multiplier)
+void __noreturn panic_smp_self_stop(void)
{
- return -EINVAL;
+ pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
+ smp_processor_id());
+ set_cpu_online(smp_processor_id(), false);
+ while (1)
+ cpu_relax();
}
#ifdef CONFIG_CPU_FREQ
@@ -706,15 +795,20 @@ static int cpufreq_callback(struct notifier_block *nb,
unsigned long val, void *data)
{
struct cpufreq_freqs *freq = data;
- int cpu = freq->cpu;
+ struct cpumask *cpus = freq->policy->cpus;
+ int cpu, first = cpumask_first(cpus);
+ unsigned int lpj;
if (freq->flags & CPUFREQ_CONST_LOOPS)
return NOTIFY_OK;
- if (!per_cpu(l_p_j_ref, cpu)) {
- per_cpu(l_p_j_ref, cpu) =
- per_cpu(cpu_data, cpu).loops_per_jiffy;
- per_cpu(l_p_j_ref_freq, cpu) = freq->old;
+ if (!per_cpu(l_p_j_ref, first)) {
+ for_each_cpu(cpu, cpus) {
+ per_cpu(l_p_j_ref, cpu) =
+ per_cpu(cpu_data, cpu).loops_per_jiffy;
+ per_cpu(l_p_j_ref_freq, cpu) = freq->old;
+ }
+
if (!global_l_p_j_ref) {
global_l_p_j_ref = loops_per_jiffy;
global_l_p_j_ref_freq = freq->old;
@@ -726,10 +820,11 @@ static int cpufreq_callback(struct notifier_block *nb,
loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
global_l_p_j_ref_freq,
freq->new);
- per_cpu(cpu_data, cpu).loops_per_jiffy =
- cpufreq_scale(per_cpu(l_p_j_ref, cpu),
- per_cpu(l_p_j_ref_freq, cpu),
- freq->new);
+
+ lpj = cpufreq_scale(per_cpu(l_p_j_ref, first),
+ per_cpu(l_p_j_ref_freq, first), freq->new);
+ for_each_cpu(cpu, cpus)
+ per_cpu(cpu_data, cpu).loops_per_jiffy = lpj;
}
return NOTIFY_OK;
}
@@ -749,10 +844,10 @@ core_initcall(register_cpufreq_notifier);
static void raise_nmi(cpumask_t *mask)
{
- smp_cross_call(mask, IPI_CPU_BACKTRACE);
+ __ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
}
-void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
{
- nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi);
+ nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_nmi);
}
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index 72f9241ad5db..6de47fb3b828 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/smp_scu.c
*
* Copyright (C) 2002 ARM Ltd.
* All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/io.h>
@@ -21,6 +18,7 @@
#define SCU_STANDBY_ENABLE (1 << 5)
#define SCU_CONFIG 0x04
#define SCU_CPU_STATUS 0x08
+#define SCU_CPU_STATUS_MASK GENMASK(1, 0)
#define SCU_INVALIDATE 0x0c
#define SCU_FPGA_REVISION 0x10
@@ -72,6 +70,24 @@ void scu_enable(void __iomem *scu_base)
}
#endif
+static int scu_set_power_mode_internal(void __iomem *scu_base,
+ unsigned int logical_cpu,
+ unsigned int mode)
+{
+ unsigned int val;
+ int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(logical_cpu), 0);
+
+ if (mode > 3 || mode == 1 || cpu > 3)
+ return -EINVAL;
+
+ val = readb_relaxed(scu_base + SCU_CPU_STATUS + cpu);
+ val &= ~SCU_CPU_STATUS_MASK;
+ val |= mode;
+ writeb_relaxed(val, scu_base + SCU_CPU_STATUS + cpu);
+
+ return 0;
+}
+
/*
* Set the executing CPUs power mode as defined. This will be in
* preparation for it executing a WFI instruction.
@@ -82,15 +98,27 @@ void scu_enable(void __iomem *scu_base)
*/
int scu_power_mode(void __iomem *scu_base, unsigned int mode)
{
+ return scu_set_power_mode_internal(scu_base, smp_processor_id(), mode);
+}
+
+/*
+ * Set the given (logical) CPU's power mode to SCU_PM_NORMAL.
+ */
+int scu_cpu_power_enable(void __iomem *scu_base, unsigned int cpu)
+{
+ return scu_set_power_mode_internal(scu_base, cpu, SCU_PM_NORMAL);
+}
+
+int scu_get_cpu_power_mode(void __iomem *scu_base, unsigned int logical_cpu)
+{
unsigned int val;
- int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
+ int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(logical_cpu), 0);
- if (mode > 3 || mode == 1 || cpu > 3)
+ if (cpu > 3)
return -EINVAL;
- val = readb_relaxed(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
- val |= mode;
- writeb_relaxed(val, scu_base + SCU_CPU_STATUS + cpu);
+ val = readb_relaxed(scu_base + SCU_CPU_STATUS + cpu);
+ val &= SCU_CPU_STATUS_MASK;
- return 0;
+ return val;
}
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 9af0701f7094..d4908b3736d8 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/smp_tlb.c
*
* Copyright (C) 2002 ARM Limited, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/preempt.h>
#include <linux/smp.h>
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index b30eafeef096..42a3706e16a6 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/smp_twd.c
*
* Copyright (C) 2002 ARM Ltd.
* All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/kernel.h>
@@ -96,12 +93,9 @@ static void twd_timer_stop(void)
{
struct clock_event_device *clk = raw_cpu_ptr(twd_evt);
- twd_shutdown(clk);
disable_percpu_irq(clk->irq);
}
-#ifdef CONFIG_COMMON_CLK
-
/*
* Updates clockevent frequency when the cpu frequency changes.
* Called on the cpu that is changing frequency with interrupts disabled.
@@ -143,54 +137,6 @@ static int twd_clk_init(void)
}
core_initcall(twd_clk_init);
-#elif defined (CONFIG_CPU_FREQ)
-
-#include <linux/cpufreq.h>
-
-/*
- * Updates clockevent frequency when the cpu frequency changes.
- * Called on the cpu that is changing frequency with interrupts disabled.
- */
-static void twd_update_frequency(void *data)
-{
- twd_timer_rate = clk_get_rate(twd_clk);
-
- clockevents_update_freq(raw_cpu_ptr(twd_evt), twd_timer_rate);
-}
-
-static int twd_cpufreq_transition(struct notifier_block *nb,
- unsigned long state, void *data)
-{
- struct cpufreq_freqs *freqs = data;
-
- /*
- * The twd clock events must be reprogrammed to account for the new
- * frequency. The timer is local to a cpu, so cross-call to the
- * changing cpu.
- */
- if (state == CPUFREQ_POSTCHANGE)
- smp_call_function_single(freqs->cpu, twd_update_frequency,
- NULL, 1);
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block twd_cpufreq_nb = {
- .notifier_call = twd_cpufreq_transition,
-};
-
-static int twd_cpufreq_init(void)
-{
- if (twd_evt && raw_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
- return cpufreq_register_notifier(&twd_cpufreq_nb,
- CPUFREQ_TRANSITION_NOTIFIER);
-
- return 0;
-}
-core_initcall(twd_cpufreq_init);
-
-#endif
-
static void twd_calibrate_rate(void)
{
unsigned long count;
@@ -366,21 +312,6 @@ out_free:
return err;
}
-int __init twd_local_timer_register(struct twd_local_timer *tlt)
-{
- if (twd_base || twd_evt)
- return -EBUSY;
-
- twd_ppi = tlt->res[1].start;
-
- twd_base = ioremap(tlt->res[0].start, resource_size(&tlt->res[0]));
- if (!twd_base)
- return -ENOMEM;
-
- return twd_local_timer_common_register(NULL);
-}
-
-#ifdef CONFIG_OF
static int __init twd_local_timer_of_register(struct device_node *np)
{
int err;
@@ -406,4 +337,3 @@ out:
TIMER_OF_DECLARE(arm_twd_a9, "arm,cortex-a9-twd-timer", twd_local_timer_of_register);
TIMER_OF_DECLARE(arm_twd_a5, "arm,cortex-a5-twd-timer", twd_local_timer_of_register);
TIMER_OF_DECLARE(arm_twd_11mp, "arm,arm11mp-twd-timer", twd_local_timer_of_register);
-#endif
diff --git a/arch/arm/kernel/spectre.c b/arch/arm/kernel/spectre.c
new file mode 100644
index 000000000000..0dcefc36fb7a
--- /dev/null
+++ b/arch/arm/kernel/spectre.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/bpf.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
+
+#include <asm/spectre.h>
+
+static bool _unprivileged_ebpf_enabled(void)
+{
+#ifdef CONFIG_BPF_SYSCALL
+ return !sysctl_unprivileged_bpf_disabled;
+#else
+ return false;
+#endif
+}
+
+ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
+static unsigned int spectre_v2_state;
+static unsigned int spectre_v2_methods;
+
+void spectre_v2_update_state(unsigned int state, unsigned int method)
+{
+ if (state > spectre_v2_state)
+ spectre_v2_state = state;
+ spectre_v2_methods |= method;
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ const char *method;
+
+ if (spectre_v2_state == SPECTRE_UNAFFECTED)
+ return sprintf(buf, "%s\n", "Not affected");
+
+ if (spectre_v2_state != SPECTRE_MITIGATED)
+ return sprintf(buf, "%s\n", "Vulnerable");
+
+ if (_unprivileged_ebpf_enabled())
+ return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
+
+ switch (spectre_v2_methods) {
+ case SPECTRE_V2_METHOD_BPIALL:
+ method = "Branch predictor hardening";
+ break;
+
+ case SPECTRE_V2_METHOD_ICIALLU:
+ method = "I-cache invalidation";
+ break;
+
+ case SPECTRE_V2_METHOD_SMC:
+ case SPECTRE_V2_METHOD_HVC:
+ method = "Firmware call";
+ break;
+
+ case SPECTRE_V2_METHOD_LOOP8:
+ method = "History overwrite";
+ break;
+
+ default:
+ method = "Multiple mitigations";
+ break;
+ }
+
+ return sprintf(buf, "Mitigation: %s\n", method);
+}
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 3a2fa203637a..620aa82e3bdd 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -1,11 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-only
#include <linux/export.h>
+#include <linux/kprobes.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/stacktrace.h>
+#include <asm/sections.h>
#include <asm/stacktrace.h>
#include <asm/traps.h>
+#include "reboot.h"
+
#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
/*
* Unwind the current stack frame and store the new register values in the
@@ -20,38 +25,114 @@
* A simple function epilogue looks like this:
* ldm sp, {fp, sp, pc}
*
+ * When compiled with clang, pc and sp are not pushed. A simple function
+ * prologue looks like this when built with clang:
+ *
+ * stmdb {..., fp, lr}
+ * add fp, sp, #x
+ * sub sp, sp, #y
+ *
+ * A simple function epilogue looks like this when built with clang:
+ *
+ * sub sp, fp, #x
+ * ldm {..., fp, pc}
+ *
+ *
* Note that with framepointer enabled, even the leaf functions have the same
* prologue and epilogue, therefore we can ignore the LR value in this case.
*/
-int notrace unwind_frame(struct stackframe *frame)
+
+extern unsigned long call_with_stack_end;
+
+static int frame_pointer_check(struct stackframe *frame)
{
unsigned long high, low;
unsigned long fp = frame->fp;
+ unsigned long pc = frame->pc;
+
+ /*
+ * call_with_stack() is the only place we allow SP to jump from one
+ * stack to another, with FP and SP pointing to different stacks,
+ * skipping the FP boundary check at this point.
+ */
+ if (pc >= (unsigned long)&call_with_stack &&
+ pc < (unsigned long)&call_with_stack_end)
+ return 0;
/* only go to a higher address on the stack */
low = frame->sp;
high = ALIGN(low, THREAD_SIZE);
/* check current frame pointer is within bounds */
+#ifdef CONFIG_CC_IS_CLANG
+ if (fp < low + 4 || fp > high - 4)
+ return -EINVAL;
+#else
if (fp < low + 12 || fp > high - 4)
return -EINVAL;
+#endif
+
+ return 0;
+}
+
+int notrace unwind_frame(struct stackframe *frame)
+{
+ unsigned long fp = frame->fp;
+
+ if (frame_pointer_check(frame))
+ return -EINVAL;
+
+ /*
+ * When we unwind through an exception stack, include the saved PC
+ * value into the stack trace.
+ */
+ if (frame->ex_frame) {
+ struct pt_regs *regs = (struct pt_regs *)frame->sp;
+
+ /*
+ * We check that 'regs + sizeof(struct pt_regs)' (that is,
+ * &regs[1]) does not exceed the bottom of the stack to avoid
+ * accessing data outside the task's stack. This may happen
+ * when frame->ex_frame is a false positive.
+ */
+ if ((unsigned long)&regs[1] > ALIGN(frame->sp, THREAD_SIZE))
+ return -EINVAL;
+
+ frame->pc = regs->ARM_pc;
+ frame->ex_frame = false;
+ return 0;
+ }
/* restore the registers from the stack frame */
- frame->fp = *(unsigned long *)(fp - 12);
- frame->sp = *(unsigned long *)(fp - 8);
- frame->pc = *(unsigned long *)(fp - 4);
+#ifdef CONFIG_CC_IS_CLANG
+ frame->sp = frame->fp;
+ frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
+ frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 4));
+#else
+ frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 12));
+ frame->sp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 8));
+ frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 4));
+#endif
+#ifdef CONFIG_KRETPROBES
+ if (is_kretprobe_trampoline(frame->pc))
+ frame->pc = kretprobe_find_ret_addr(frame->tsk,
+ (void *)frame->fp, &frame->kr_cur);
+#endif
+
+ if (in_entry_text(frame->pc))
+ frame->ex_frame = true;
return 0;
}
#endif
void notrace walk_stackframe(struct stackframe *frame,
- int (*fn)(struct stackframe *, void *), void *data)
+ bool (*fn)(void *, unsigned long), void *data)
{
while (1) {
int ret;
- if (fn(frame, data))
+ if (!fn(data, frame->pc))
break;
ret = unwind_frame(frame);
if (ret < 0)
@@ -61,120 +142,56 @@ void notrace walk_stackframe(struct stackframe *frame,
EXPORT_SYMBOL(walk_stackframe);
#ifdef CONFIG_STACKTRACE
-struct stack_trace_data {
- struct stack_trace *trace;
- unsigned long last_pc;
- unsigned int no_sched_functions;
- unsigned int skip;
-};
-
-static int save_trace(struct stackframe *frame, void *d)
+static void start_stack_trace(struct stackframe *frame, struct task_struct *task,
+ unsigned long fp, unsigned long sp,
+ unsigned long lr, unsigned long pc)
{
- struct stack_trace_data *data = d;
- struct stack_trace *trace = data->trace;
- struct pt_regs *regs;
- unsigned long addr = frame->pc;
-
- if (data->no_sched_functions && in_sched_functions(addr))
- return 0;
- if (data->skip) {
- data->skip--;
- return 0;
- }
-
- trace->entries[trace->nr_entries++] = addr;
-
- if (trace->nr_entries >= trace->max_entries)
- return 1;
-
- /*
- * in_exception_text() is designed to test if the PC is one of
- * the functions which has an exception stack above it, but
- * unfortunately what is in frame->pc is the return LR value,
- * not the saved PC value. So, we need to track the previous
- * frame PC value when doing this.
- */
- addr = data->last_pc;
- data->last_pc = frame->pc;
- if (!in_exception_text(addr))
- return 0;
-
- regs = (struct pt_regs *)frame->sp;
-
- trace->entries[trace->nr_entries++] = regs->ARM_pc;
-
- return trace->nr_entries >= trace->max_entries;
+ frame->fp = fp;
+ frame->sp = sp;
+ frame->lr = lr;
+ frame->pc = pc;
+#ifdef CONFIG_KRETPROBES
+ frame->kr_cur = NULL;
+ frame->tsk = task;
+#endif
+#ifdef CONFIG_UNWINDER_FRAME_POINTER
+ frame->ex_frame = in_entry_text(frame->pc);
+#endif
}
-/* This must be noinline to so that our skip calculation works correctly */
-static noinline void __save_stack_trace(struct task_struct *tsk,
- struct stack_trace *trace, unsigned int nosched)
+void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ struct task_struct *task, struct pt_regs *regs)
{
- struct stack_trace_data data;
struct stackframe frame;
- data.trace = trace;
- data.last_pc = ULONG_MAX;
- data.skip = trace->skip;
- data.no_sched_functions = nosched;
-
- if (tsk != current) {
+ if (regs) {
+ start_stack_trace(&frame, NULL, regs->ARM_fp, regs->ARM_sp,
+ regs->ARM_lr, regs->ARM_pc);
+ } else if (task != current) {
#ifdef CONFIG_SMP
/*
* What guarantees do we have here that 'tsk' is not
* running on another CPU? For now, ignore it as we
* can't guarantee we won't explode.
*/
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
return;
#else
- frame.fp = thread_saved_fp(tsk);
- frame.sp = thread_saved_sp(tsk);
- frame.lr = 0; /* recovered from the stack */
- frame.pc = thread_saved_pc(tsk);
+ start_stack_trace(&frame, task, thread_saved_fp(task),
+ thread_saved_sp(task), 0,
+ thread_saved_pc(task));
#endif
} else {
- /* We don't want this function nor the caller */
- data.skip += 2;
- frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.sp = current_stack_pointer;
- frame.lr = (unsigned long)__builtin_return_address(0);
- frame.pc = (unsigned long)__save_stack_trace;
+here:
+ start_stack_trace(&frame, task,
+ (unsigned long)__builtin_frame_address(0),
+ current_stack_pointer,
+ (unsigned long)__builtin_return_address(0),
+ (unsigned long)&&here);
+ /* skip this function */
+ if (unwind_frame(&frame))
+ return;
}
- walk_stackframe(&frame, save_trace, &data);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
-}
-
-void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
-{
- struct stack_trace_data data;
- struct stackframe frame;
-
- data.trace = trace;
- data.skip = trace->skip;
- data.no_sched_functions = 0;
-
- frame.fp = regs->ARM_fp;
- frame.sp = regs->ARM_sp;
- frame.lr = regs->ARM_lr;
- frame.pc = regs->ARM_pc;
-
- walk_stackframe(&frame, save_trace, &data);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
-}
-
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
-{
- __save_stack_trace(tsk, trace, 1);
-}
-
-void save_stack_trace(struct stack_trace *trace)
-{
- __save_stack_trace(current, trace, 0);
+ walk_stackframe(&frame, consume_entry, cookie);
}
-EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index ef794c799cb6..58a6441b58c4 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -1,15 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/ftrace.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mm_types.h>
+#include <linux/pgtable.h>
+#include <asm/bugs.h>
#include <asm/cacheflush.h>
#include <asm/idmap.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
#include <asm/tlbflush.h>
+#include <asm/uaccess.h>
extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid);
extern void cpu_resume_mmu(void);
@@ -25,16 +28,34 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
return -EINVAL;
/*
+ * Needed for the MMU disabling/enabing code to be able to run from
+ * TTBR0 addresses.
+ */
+ if (IS_ENABLED(CONFIG_CPU_TTBR0_PAN))
+ uaccess_save_and_enable();
+
+ /*
+ * Function graph tracer state gets incosistent when the kernel
+ * calls functions that never return (aka suspend finishers) hence
+ * disable graph tracing during their execution.
+ */
+ pause_graph_tracing();
+
+ /*
* Provide a temporary page table with an identity mapping for
* the MMU-enable code, required for resuming. On successful
* resume (indicated by a zero return code), we need to switch
* back to the correct page tables.
*/
ret = __cpu_suspend(arg, fn, __mpidr);
+
+ unpause_graph_tracing();
+
if (ret == 0) {
cpu_switch_mm(mm->pgd, mm);
local_flush_bp_all();
local_flush_tlb_all();
+ check_other_bugs();
}
return ret;
@@ -43,7 +64,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
u32 __mpidr = cpu_logical_map(smp_processor_id());
- return __cpu_suspend(arg, fn, __mpidr);
+ int ret;
+
+ pause_graph_tracing();
+ ret = __cpu_suspend(arg, fn, __mpidr);
+ unpause_graph_tracing();
+
+ return ret;
}
#define idmap_pgd NULL
#endif
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 3bda08bee674..fdce83c95acb 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/swp_emulate.c
*
* Copyright (C) 2009 ARM Limited
* __user_* functions adapted from include/asm/uaccess.h
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Implements emulation of the SWP/SWPB instructions using load-exclusive and
* store-exclusive for processors that have them disabled (or future ones that
* might not implement them).
@@ -37,6 +34,7 @@
*/
#define __user_swpX_asm(data, addr, res, temp, B) \
__asm__ __volatile__( \
+ ".arch armv7-a\n" \
"0: ldrex"B" %2, [%3]\n" \
"1: strex"B" %0, %1, [%3]\n" \
" cmp %0, #0\n" \
@@ -91,18 +89,6 @@ static int proc_status_show(struct seq_file *m, void *v)
seq_printf(m, "Last process:\t\t%d\n", previous_pid);
return 0;
}
-
-static int proc_status_open(struct inode *inode, struct file *file)
-{
- return single_open(file, proc_status_show, PDE_DATA(inode));
-}
-
-static const struct file_operations proc_status_fops = {
- .open = proc_status_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
#endif
/*
@@ -110,21 +96,20 @@ static const struct file_operations proc_status_fops = {
*/
static void set_segfault(struct pt_regs *regs, unsigned long addr)
{
- siginfo_t info;
+ int si_code;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
if (find_vma(current->mm, addr) == NULL)
- info.si_code = SEGV_MAPERR;
+ si_code = SEGV_MAPERR;
else
- info.si_code = SEGV_ACCERR;
- up_read(&current->mm->mmap_sem);
-
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- info.si_addr = (void *) instruction_pointer(regs);
+ si_code = SEGV_ACCERR;
+ mmap_read_unlock(current->mm);
pr_debug("SWP{B} emulation: access caused memory abort!\n");
- arm_notify_die("Illegal memory access", regs, &info, 0, 0);
+ arm_notify_die("Illegal memory access", regs,
+ SIGSEGV, si_code,
+ (void __user *)instruction_pointer(regs),
+ 0, 0);
abtcounter++;
}
@@ -211,7 +196,7 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr)
destreg, EXTRACT_REG_NUM(instr, RT2_OFFSET), data);
/* Check access in reasonable access range for both SWP and SWPB */
- if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {
+ if (!access_ok((void __user *)(address & ~3), 4)) {
pr_debug("SWP{B} emulation: access to %p not allowed!\n",
(void *)address);
res = -EFAULT;
@@ -260,7 +245,8 @@ static int __init swp_emulation_init(void)
return 0;
#ifdef CONFIG_PROC_FS
- if (!proc_create("cpu/swp_emulation", S_IRUGO, NULL, &proc_status_fops))
+ if (!proc_create_single("cpu/swp_emulation", S_IRUGO, NULL,
+ proc_status_show))
return -ENOMEM;
#endif /* CONFIG_PROC_FS */
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
index 3151f5623d0e..0141e9bb02e8 100644
--- a/arch/arm/kernel/sys_arm.c
+++ b/arch/arm/kernel/sys_arm.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/sys_arm.c
*
* Copyright (C) People who wrote linux/arch/i386/kernel/sys_i386.c
* Copyright (C) 1995, 1996 Russell King.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* This file contains various random system calls that
* have a non-standard calling sequence on the Linux/arm
* platform.
@@ -27,6 +24,7 @@
#include <linux/ipc.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <asm/syscalls.h>
/*
* Since loff_t is a 64 bit type we avoid a lot of ABI hassle
@@ -35,5 +33,5 @@
asmlinkage long sys_arm_fadvise64_64(int fd, int advice,
loff_t offset, loff_t len)
{
- return sys_fadvise64_64(fd, offset, len, advice);
+ return ksys_fadvise64_64(fd, offset, len, advice);
}
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
index b9786f491873..2944721e82a2 100644
--- a/arch/arm/kernel/sys_oabi-compat.c
+++ b/arch/arm/kernel/sys_oabi-compat.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm/kernel/sys_oabi-compat.c
*
@@ -7,12 +8,10 @@
* Author: Nicolas Pitre
* Created: Oct 7, 2005
* Copyright: MontaVista Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
+#include <asm/syscalls.h>
+
/*
* The legacy ABI and the new ARM EABI have different rules making some
* syscalls incompatible especially with structure arguments.
@@ -76,6 +75,7 @@
#include <linux/syscalls.h>
#include <linux/errno.h>
#include <linux/fs.h>
+#include <linux/filelock.h>
#include <linux/cred.h>
#include <linux/fcntl.h>
#include <linux/eventpoll.h>
@@ -83,9 +83,12 @@
#include <linux/socket.h>
#include <linux/net.h>
#include <linux/ipc.h>
+#include <linux/ipc_namespace.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <asm/syscall.h>
+
struct oldabi_stat64 {
unsigned long long st_dev;
unsigned int __pad1;
@@ -194,113 +197,131 @@ struct oabi_flock64 {
pid_t l_pid;
} __attribute__ ((packed,aligned(4)));
-static long do_locks(unsigned int fd, unsigned int cmd,
- unsigned long arg)
+static int get_oabi_flock(struct flock64 *kernel, struct oabi_flock64 __user *arg)
{
- struct flock64 kernel;
struct oabi_flock64 user;
- mm_segment_t fs;
- long ret;
if (copy_from_user(&user, (struct oabi_flock64 __user *)arg,
sizeof(user)))
return -EFAULT;
- kernel.l_type = user.l_type;
- kernel.l_whence = user.l_whence;
- kernel.l_start = user.l_start;
- kernel.l_len = user.l_len;
- kernel.l_pid = user.l_pid;
-
- fs = get_fs();
- set_fs(KERNEL_DS);
- ret = sys_fcntl64(fd, cmd, (unsigned long)&kernel);
- set_fs(fs);
-
- if (!ret && (cmd == F_GETLK64 || cmd == F_OFD_GETLK)) {
- user.l_type = kernel.l_type;
- user.l_whence = kernel.l_whence;
- user.l_start = kernel.l_start;
- user.l_len = kernel.l_len;
- user.l_pid = kernel.l_pid;
- if (copy_to_user((struct oabi_flock64 __user *)arg,
- &user, sizeof(user)))
- ret = -EFAULT;
- }
- return ret;
+
+ kernel->l_type = user.l_type;
+ kernel->l_whence = user.l_whence;
+ kernel->l_start = user.l_start;
+ kernel->l_len = user.l_len;
+ kernel->l_pid = user.l_pid;
+
+ return 0;
+}
+
+static int put_oabi_flock(struct flock64 *kernel, struct oabi_flock64 __user *arg)
+{
+ struct oabi_flock64 user;
+
+ user.l_type = kernel->l_type;
+ user.l_whence = kernel->l_whence;
+ user.l_start = kernel->l_start;
+ user.l_len = kernel->l_len;
+ user.l_pid = kernel->l_pid;
+
+ if (copy_to_user((struct oabi_flock64 __user *)arg,
+ &user, sizeof(user)))
+ return -EFAULT;
+
+ return 0;
}
asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd,
unsigned long arg)
{
+ void __user *argp = (void __user *)arg;
+ CLASS(fd_raw, f)(fd);
+ struct flock64 flock;
+ long err;
+
+ if (fd_empty(f))
+ return -EBADF;
+
switch (cmd) {
- case F_OFD_GETLK:
- case F_OFD_SETLK:
- case F_OFD_SETLKW:
case F_GETLK64:
+ case F_OFD_GETLK:
+ err = security_file_fcntl(fd_file(f), cmd, arg);
+ if (err)
+ break;
+ err = get_oabi_flock(&flock, argp);
+ if (err)
+ break;
+ err = fcntl_getlk64(fd_file(f), cmd, &flock);
+ if (!err)
+ err = put_oabi_flock(&flock, argp);
+ break;
case F_SETLK64:
case F_SETLKW64:
- return do_locks(fd, cmd, arg);
-
+ case F_OFD_SETLK:
+ case F_OFD_SETLKW:
+ err = security_file_fcntl(fd_file(f), cmd, arg);
+ if (err)
+ break;
+ err = get_oabi_flock(&flock, argp);
+ if (err)
+ break;
+ err = fcntl_setlk64(fd, fd_file(f), cmd, &flock);
+ break;
default:
- return sys_fcntl64(fd, cmd, arg);
+ err = sys_fcntl64(fd, cmd, arg);
+ break;
}
+ return err;
}
struct oabi_epoll_event {
- __u32 events;
+ __poll_t events;
__u64 data;
} __attribute__ ((packed,aligned(4)));
+#ifdef CONFIG_EPOLL
asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd,
struct oabi_epoll_event __user *event)
{
struct oabi_epoll_event user;
struct epoll_event kernel;
- mm_segment_t fs;
- long ret;
- if (op == EPOLL_CTL_DEL)
- return sys_epoll_ctl(epfd, op, fd, NULL);
- if (copy_from_user(&user, event, sizeof(user)))
+ if (ep_op_has_event(op) &&
+ copy_from_user(&user, event, sizeof(user)))
return -EFAULT;
+
kernel.events = user.events;
kernel.data = user.data;
- fs = get_fs();
- set_fs(KERNEL_DS);
- ret = sys_epoll_ctl(epfd, op, fd, &kernel);
- set_fs(fs);
- return ret;
+
+ return do_epoll_ctl(epfd, op, fd, &kernel, false);
+}
+#else
+asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd,
+ struct oabi_epoll_event __user *event)
+{
+ return -EINVAL;
}
+#endif
-asmlinkage long sys_oabi_epoll_wait(int epfd,
- struct oabi_epoll_event __user *events,
- int maxevents, int timeout)
+struct epoll_event __user *
+epoll_put_uevent(__poll_t revents, __u64 data,
+ struct epoll_event __user *uevent)
{
- struct epoll_event *kbuf;
- mm_segment_t fs;
- long ret, err, i;
+ if (in_oabi_syscall()) {
+ struct oabi_epoll_event __user *oevent = (void __user *)uevent;
- if (maxevents <= 0 ||
- maxevents > (INT_MAX/sizeof(*kbuf)) ||
- maxevents > (INT_MAX/sizeof(*events)))
- return -EINVAL;
- if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents))
- return -EFAULT;
- kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL);
- if (!kbuf)
- return -ENOMEM;
- fs = get_fs();
- set_fs(KERNEL_DS);
- ret = sys_epoll_wait(epfd, kbuf, maxevents, timeout);
- set_fs(fs);
- err = 0;
- for (i = 0; i < ret; i++) {
- __put_user_error(kbuf[i].events, &events->events, err);
- __put_user_error(kbuf[i].data, &events->data, err);
- events++;
+ if (__put_user(revents, &oevent->events) ||
+ __put_user(data, &oevent->data))
+ return NULL;
+
+ return (void __user *)(oevent+1);
}
- kfree(kbuf);
- return err ? -EFAULT : ret;
+
+ if (__put_user(revents, &uevent->events) ||
+ __put_user(data, &uevent->data))
+ return NULL;
+
+ return uevent+1;
}
struct oabi_sembuf {
@@ -310,44 +331,52 @@ struct oabi_sembuf {
unsigned short __pad;
};
+#define sc_semopm sem_ctls[2]
+
+#ifdef CONFIG_SYSVIPC
asmlinkage long sys_oabi_semtimedop(int semid,
struct oabi_sembuf __user *tsops,
unsigned nsops,
- const struct timespec __user *timeout)
+ const struct old_timespec32 __user *timeout)
{
+ struct ipc_namespace *ns;
struct sembuf *sops;
- struct timespec local_timeout;
long err;
int i;
+ ns = current->nsproxy->ipc_ns;
+ if (nsops > ns->sc_semopm)
+ return -E2BIG;
if (nsops < 1 || nsops > SEMOPM)
return -EINVAL;
- if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops))
- return -EFAULT;
- sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
+ sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
if (!sops)
return -ENOMEM;
err = 0;
for (i = 0; i < nsops; i++) {
- __get_user_error(sops[i].sem_num, &tsops->sem_num, err);
- __get_user_error(sops[i].sem_op, &tsops->sem_op, err);
- __get_user_error(sops[i].sem_flg, &tsops->sem_flg, err);
+ struct oabi_sembuf osb;
+ err |= copy_from_user(&osb, tsops, sizeof(osb));
+ sops[i].sem_num = osb.sem_num;
+ sops[i].sem_op = osb.sem_op;
+ sops[i].sem_flg = osb.sem_flg;
tsops++;
}
- if (timeout) {
- /* copy this as well before changing domain protection */
- err |= copy_from_user(&local_timeout, timeout, sizeof(*timeout));
- timeout = &local_timeout;
- }
if (err) {
err = -EFAULT;
- } else {
- mm_segment_t fs = get_fs();
- set_fs(KERNEL_DS);
- err = sys_semtimedop(semid, sops, nsops, timeout);
- set_fs(fs);
+ goto out;
+ }
+
+ if (timeout) {
+ struct timespec64 ts;
+ err = get_old_timespec32(&ts, timeout);
+ if (err)
+ goto out;
+ err = __do_semtimedop(semid, sops, nsops, &ts, ns);
+ goto out;
}
- kfree(sops);
+ err = __do_semtimedop(semid, sops, nsops, NULL, ns);
+out:
+ kvfree(sops);
return err;
}
@@ -369,11 +398,32 @@ asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third,
return sys_oabi_semtimedop(first,
(struct oabi_sembuf __user *)ptr,
second,
- (const struct timespec __user *)fifth);
+ (const struct old_timespec32 __user *)fifth);
default:
return sys_ipc(call, first, second, third, ptr, fifth);
}
}
+#else
+asmlinkage long sys_oabi_semtimedop(int semid,
+ struct oabi_sembuf __user *tsops,
+ unsigned nsops,
+ const struct old_timespec32 __user *timeout)
+{
+ return -ENOSYS;
+}
+
+asmlinkage long sys_oabi_semop(int semid, struct oabi_sembuf __user *tsops,
+ unsigned nsops)
+{
+ return -ENOSYS;
+}
+
+asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third,
+ void __user *ptr, long fifth)
+{
+ return -ENOSYS;
+}
+#endif
asmlinkage long sys_oabi_bind(int fd, struct sockaddr __user *addr, int addrlen)
{
diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
index b10e1360762e..f59927bcfbce 100644
--- a/arch/arm/kernel/tcm.c
+++ b/arch/arm/kernel/tcm.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2008-2009 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
* TCM memory handling for ARM systems
*
* Author: Linus Walleij <linus.walleij@stericsson.com>
@@ -15,9 +15,10 @@
#include <linux/string.h> /* memcpy */
#include <asm/cputype.h>
#include <asm/mach/map.h>
-#include <asm/memory.h>
+#include <asm/page.h>
#include <asm/system_info.h>
#include <asm/traps.h>
+#include <asm/tcm.h>
#define TCMTR_FORMAT_MASK 0xe0000000U
@@ -30,8 +31,8 @@ extern char __itcm_start, __sitcm_text, __eitcm_text;
extern char __dtcm_start, __sdtcm_data, __edtcm_data;
/* These will be increased as we run */
-u32 dtcm_end = DTCM_OFFSET;
-u32 itcm_end = ITCM_OFFSET;
+static u32 dtcm_end = DTCM_OFFSET;
+static u32 itcm_end = ITCM_OFFSET;
/*
* TCM memory resources
diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c
index 8ff8dbfbe9fb..d832eb9e6235 100644
--- a/arch/arm/kernel/thumbee.c
+++ b/arch/arm/kernel/thumbee.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm/kernel/thumbee.c
*
* Copyright (C) 2008 ARM Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 629f8e9981f1..b3836c94dc74 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -1,17 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/time.c
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
* Modifications for ARM (C) 1994-2001 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* This file contains the ARM-specific time handling details:
* reading the RTC at bootup, etc...
*/
-#include <linux/clk-provider.h>
+#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/errno.h>
#include <linux/export.h>
@@ -19,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
+#include <linux/of_clk.h>
#include <linux/profile.h>
#include <linux/sched.h>
#include <linux/sched_clock.h>
@@ -62,20 +60,6 @@ unsigned long profile_pc(struct pt_regs *regs)
EXPORT_SYMBOL(profile_pc);
#endif
-#ifndef CONFIG_GENERIC_CLOCKEVENTS
-/*
- * Kernel system timer support.
- */
-void timer_tick(void)
-{
- profile_tick(CPU_PROFILING);
- xtime_update(1);
-#ifndef CONFIG_SMP
- update_process_times(user_mode(get_irq_regs()));
-#endif
-}
-#endif
-
static void dummy_clock_access(struct timespec64 *ts)
{
ts->tv_sec = 0;
@@ -83,29 +67,18 @@ static void dummy_clock_access(struct timespec64 *ts)
}
static clock_access_fn __read_persistent_clock = dummy_clock_access;
-static clock_access_fn __read_boot_clock = dummy_clock_access;;
void read_persistent_clock64(struct timespec64 *ts)
{
__read_persistent_clock(ts);
}
-void read_boot_clock64(struct timespec64 *ts)
-{
- __read_boot_clock(ts);
-}
-
-int __init register_persistent_clock(clock_access_fn read_boot,
- clock_access_fn read_persistent)
+int __init register_persistent_clock(clock_access_fn read_persistent)
{
/* Only allow the clockaccess functions to be registered once */
- if (__read_persistent_clock == dummy_clock_access &&
- __read_boot_clock == dummy_clock_access) {
- if (read_boot)
- __read_boot_clock = read_boot;
+ if (__read_persistent_clock == dummy_clock_access) {
if (read_persistent)
__read_persistent_clock = read_persistent;
-
return 0;
}
@@ -121,5 +94,6 @@ void __init time_init(void)
of_clk_init(NULL);
#endif
timer_probe();
+ tick_setup_hrtimer_broadcast();
}
}
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index bf949a763dbe..2336ee2aa44a 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -42,7 +42,7 @@
* can take this difference into account during load balance. A per cpu
* structure is preferred because each CPU updates its own cpu_capacity field
* during the load balance except for idle cores. One idle core is selected
- * to run the rebalance_domains for all idle cores and the cpu_capacity can be
+ * to run the sched_balance_domains for all idle cores and the cpu_capacity can be
* updated during this sequence.
*/
@@ -94,14 +94,8 @@ static void __init parse_dt_topology(void)
__cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
GFP_NOWAIT);
- cn = of_find_node_by_path("/cpus");
- if (!cn) {
- pr_err("No CPU information found in DT\n");
- return;
- }
-
for_each_possible_cpu(cpu) {
- const u32 *rate;
+ const __be32 *rate;
int len;
/* too early to use cpu->of_node */
@@ -127,8 +121,7 @@ static void __init parse_dt_topology(void)
rate = of_get_property(cn, "clock-frequency", &len);
if (!rate || len != 4) {
- pr_err("%s missing clock-frequency property\n",
- cn->full_name);
+ pr_err("%pOF missing clock-frequency property\n", cn);
continue;
}
@@ -176,7 +169,7 @@ static void update_cpu_capacity(unsigned int cpu)
topology_set_cpu_scale(cpu, cpu_capacity(cpu) / middle_capacity);
pr_info("CPU%u: update cpu_capacity %lu\n",
- cpu, topology_get_cpu_scale(NULL, cpu));
+ cpu, topology_get_cpu_scale(cpu));
}
#else
@@ -184,52 +177,6 @@ static inline void parse_dt_topology(void) {}
static inline void update_cpu_capacity(unsigned int cpuid) {}
#endif
- /*
- * cpu topology table
- */
-struct cputopo_arm cpu_topology[NR_CPUS];
-EXPORT_SYMBOL_GPL(cpu_topology);
-
-const struct cpumask *cpu_coregroup_mask(int cpu)
-{
- return &cpu_topology[cpu].core_sibling;
-}
-
-/*
- * The current assumption is that we can power gate each core independently.
- * This will be superseded by DT binding once available.
- */
-const struct cpumask *cpu_corepower_mask(int cpu)
-{
- return &cpu_topology[cpu].thread_sibling;
-}
-
-static void update_siblings_masks(unsigned int cpuid)
-{
- struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
- int cpu;
-
- /* update core and thread sibling masks */
- for_each_possible_cpu(cpu) {
- cpu_topo = &cpu_topology[cpu];
-
- if (cpuid_topo->socket_id != cpu_topo->socket_id)
- continue;
-
- cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
- if (cpu != cpuid)
- cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
-
- if (cpuid_topo->core_id != cpu_topo->core_id)
- continue;
-
- cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
- if (cpu != cpuid)
- cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
- }
- smp_wmb();
-}
-
/*
* store_cpu_topology is called at boot when only one cpu is running
* and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
@@ -237,12 +184,11 @@ static void update_siblings_masks(unsigned int cpuid)
*/
void store_cpu_topology(unsigned int cpuid)
{
- struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
+ struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
unsigned int mpidr;
- /* If the cpu topology has been already set, just return */
- if (cpuid_topo->core_id != -1)
- return;
+ if (cpuid_topo->package_id != -1)
+ goto topology_populated;
mpidr = read_cpuid_mpidr();
@@ -257,12 +203,12 @@ void store_cpu_topology(unsigned int cpuid)
/* core performance interdependency */
cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
- cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+ cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
} else {
/* largely independent cores */
cpuid_topo->thread_id = -1;
cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
}
} else {
/*
@@ -272,55 +218,28 @@ void store_cpu_topology(unsigned int cpuid)
*/
cpuid_topo->thread_id = -1;
cpuid_topo->core_id = 0;
- cpuid_topo->socket_id = -1;
+ cpuid_topo->package_id = -1;
}
- update_siblings_masks(cpuid);
-
update_cpu_capacity(cpuid);
pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
cpuid, cpu_topology[cpuid].thread_id,
cpu_topology[cpuid].core_id,
- cpu_topology[cpuid].socket_id, mpidr);
-}
+ cpu_topology[cpuid].package_id, mpidr);
-static inline int cpu_corepower_flags(void)
-{
- return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
+topology_populated:
+ update_siblings_masks(cpuid);
}
-static struct sched_domain_topology_level arm_topology[] = {
-#ifdef CONFIG_SCHED_MC
- { cpu_corepower_mask, cpu_corepower_flags, SD_INIT_NAME(GMC) },
- { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
-#endif
- { cpu_cpu_mask, SD_INIT_NAME(DIE) },
- { NULL, },
-};
-
/*
* init_cpu_topology is called at boot when only one cpu is running
* which prevent simultaneous write access to cpu_topology array
*/
void __init init_cpu_topology(void)
{
- unsigned int cpu;
-
- /* init core mask and capacity */
- for_each_possible_cpu(cpu) {
- struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
-
- cpu_topo->thread_id = -1;
- cpu_topo->core_id = -1;
- cpu_topo->socket_id = -1;
- cpumask_clear(&cpu_topo->core_sibling);
- cpumask_clear(&cpu_topo->thread_sibling);
- }
+ reset_cpu_topology();
smp_wmb();
parse_dt_topology();
-
- /* Set scheduler topology descriptor */
- set_sched_topology(arm_topology);
}
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 948c648fea00..afbd2ebe5c39 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/traps.c
*
* Copyright (C) 1995-2009 Russell King
* Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* 'traps.c' handles hardware exceptions after we have saved some state in
* 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably
* kill the offending process.
@@ -19,6 +16,7 @@
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/kdebug.h>
+#include <linux/kprobes.h>
#include <linux/module.h>
#include <linux/kexec.h>
#include <linux/bug.h>
@@ -28,15 +26,18 @@
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/irq.h>
+#include <linux/vmalloc.h>
#include <linux/atomic.h>
#include <asm/cacheflush.h>
#include <asm/exception.h>
+#include <asm/spectre.h>
#include <asm/unistd.h>
#include <asm/traps.h>
#include <asm/ptrace.h>
#include <asm/unwind.h>
#include <asm/tls.h>
+#include <asm/stacktrace.h>
#include <asm/system_misc.h>
#include <asm/opcodes.h>
@@ -62,21 +63,39 @@ static int __init user_debug_setup(char *str)
__setup("user_debug=", user_debug_setup);
#endif
-static void dump_mem(const char *, const char *, unsigned long, unsigned long);
+void dump_backtrace_entry(unsigned long where, unsigned long from,
+ unsigned long frame, const char *loglvl)
+{
+ unsigned long end = frame + 4 + sizeof(struct pt_regs);
+
+ if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER) &&
+ IS_ENABLED(CONFIG_CC_IS_GCC) &&
+ end > ALIGN(frame, THREAD_SIZE)) {
+ /*
+ * If we are walking past the end of the stack, it may be due
+ * to the fact that we are on an IRQ or overflow stack. In this
+ * case, we can load the address of the other stack from the
+ * frame record.
+ */
+ frame = ((unsigned long *)frame)[-2] - 4;
+ end = frame + 4 + sizeof(struct pt_regs);
+ }
-void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
-{
-#ifdef CONFIG_KALLSYMS
- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
+#ifndef CONFIG_KALLSYMS
+ printk("%sFunction entered at [<%08lx>] from [<%08lx>]\n",
+ loglvl, where, from);
+#elif defined CONFIG_BACKTRACE_VERBOSE
+ printk("%s[<%08lx>] (%ps) from [<%08lx>] (%pS)\n",
+ loglvl, where, (void *)where, from, (void *)from);
#else
- printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
+ printk("%s %ps from %pS\n", loglvl, (void *)where, (void *)from);
#endif
- if (in_exception_text(where))
- dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
+ if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE))
+ dump_mem(loglvl, "Exception stack", frame + 4, end);
}
-void dump_backtrace_stm(u32 *stack, u32 instruction)
+void dump_backtrace_stm(u32 *stack, u32 instruction, const char *loglvl)
{
char str[80], *p;
unsigned int x;
@@ -88,12 +107,12 @@ void dump_backtrace_stm(u32 *stack, u32 instruction)
if (++x == 6) {
x = 0;
p = str;
- printk("%s\n", str);
+ printk("%s%s\n", loglvl, str);
}
}
}
if (p != str)
- printk("%s\n", str);
+ printk("%s%s\n", loglvl, str);
}
#ifndef CONFIG_ARM_UNWIND
@@ -105,7 +124,8 @@ void dump_backtrace_stm(u32 *stack, u32 instruction)
static int verify_stack(unsigned long sp)
{
if (sp < PAGE_OFFSET ||
- (sp > (unsigned long)high_memory && high_memory != NULL))
+ (!IS_ENABLED(CONFIG_VMAP_STACK) &&
+ sp > (unsigned long)high_memory && high_memory != NULL))
return -EFAULT;
return 0;
@@ -115,21 +135,12 @@ static int verify_stack(unsigned long sp)
/*
* Dump out the contents of some memory nicely...
*/
-static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
- unsigned long top)
+void dump_mem(const char *lvl, const char *str, unsigned long bottom,
+ unsigned long top)
{
unsigned long first;
- mm_segment_t fs;
int i;
- /*
- * We need to switch to kernel mode so that we can use __get_user
- * to safely read from kernel space. Note that we now dump the
- * code first, just in case the backtrace kills us.
- */
- fs = get_fs();
- set_fs(KERNEL_DS);
-
printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top);
for (first = bottom & ~31; first < top; first += 32) {
@@ -142,7 +153,7 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
if (p >= bottom && p < top) {
unsigned long val;
- if (__get_user(val, (unsigned long *)p) == 0)
+ if (!get_kernel_nofault(val, (unsigned long *)p))
sprintf(str + i * 9, " %08lx", val);
else
sprintf(str + i * 9, " ????????");
@@ -150,8 +161,6 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
}
printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
}
-
- set_fs(fs);
}
static void dump_instr(const char *lvl, struct pt_regs *regs)
@@ -159,25 +168,34 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
unsigned long addr = instruction_pointer(regs);
const int thumb = thumb_mode(regs);
const int width = thumb ? 4 : 8;
- mm_segment_t fs;
char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
int i;
/*
- * We need to switch to kernel mode so that we can use __get_user
- * to safely read from kernel space. Note that we now dump the
- * code first, just in case the backtrace kills us.
+ * Note that we now dump the code first, just in case the backtrace
+ * kills us.
*/
- fs = get_fs();
- set_fs(KERNEL_DS);
for (i = -4; i < 1 + !!thumb; i++) {
unsigned int val, bad;
- if (thumb)
- bad = __get_user(val, &((u16 *)addr)[i]);
- else
- bad = __get_user(val, &((u32 *)addr)[i]);
+ if (thumb) {
+ u16 tmp;
+
+ if (user_mode(regs))
+ bad = get_user(tmp, &((u16 __user *)addr)[i]);
+ else
+ bad = get_kernel_nofault(tmp, &((u16 *)addr)[i]);
+
+ val = __mem_to_opcode_thumb16(tmp);
+ } else {
+ if (user_mode(regs))
+ bad = get_user(val, &((u32 __user *)addr)[i]);
+ else
+ bad = get_kernel_nofault(val, &((u32 *)addr)[i]);
+
+ val = __mem_to_opcode_arm(val);
+ }
if (!bad)
p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
@@ -188,22 +206,22 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
}
}
printk("%sCode: %s\n", lvl, str);
-
- set_fs(fs);
}
#ifdef CONFIG_ARM_UNWIND
-static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl)
{
- unwind_backtrace(regs, tsk);
+ unwind_backtrace(regs, tsk, loglvl);
}
#else
-static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl)
{
unsigned int fp, mode;
int ok = 1;
- printk("Backtrace: ");
+ printk("%sCall trace: ", loglvl);
if (!tsk)
tsk = current;
@@ -230,21 +248,16 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
pr_cont("\n");
if (ok)
- c_backtrace(fp, mode);
+ c_backtrace(fp, mode, loglvl);
}
#endif
-void show_stack(struct task_struct *tsk, unsigned long *sp)
+void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
{
- dump_backtrace(NULL, tsk);
+ dump_backtrace(NULL, tsk, loglvl);
barrier();
}
-#ifdef CONFIG_PREEMPT
-#define S_PREEMPT " PREEMPT"
-#else
-#define S_PREEMPT ""
-#endif
#ifdef CONFIG_SMP
#define S_SMP " SMP"
#else
@@ -262,8 +275,8 @@ static int __die(const char *str, int err, struct pt_regs *regs)
static int die_counter;
int ret;
- pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP S_ISA "\n",
- str, err, ++die_counter);
+ pr_emerg("Internal error: %s: %x [#%d]" S_SMP S_ISA "\n",
+ str, err, ++die_counter);
/* trap and error numbers are mostly meaningless on ARM */
ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
@@ -272,13 +285,15 @@ static int __die(const char *str, int err, struct pt_regs *regs)
print_modules();
__show_regs(regs);
+ __show_regs_alloc_free(regs);
pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk));
if (!user_mode(regs) || in_interrupt()) {
dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
- THREAD_SIZE + (unsigned long)task_stack_page(tsk));
- dump_backtrace(regs, tsk);
+ ALIGN(regs->ARM_sp - THREAD_SIZE, THREAD_ALIGN)
+ + THREAD_SIZE);
+ dump_backtrace(regs, tsk, KERN_EMERG);
dump_instr(KERN_EMERG, regs);
}
@@ -332,7 +347,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
if (panic_on_oops)
panic("Fatal exception");
if (signr)
- do_exit(signr);
+ make_task_dead(signr);
}
/*
@@ -356,13 +371,14 @@ void die(const char *str, struct pt_regs *regs, int err)
}
void arm_notify_die(const char *str, struct pt_regs *regs,
- struct siginfo *info, unsigned long err, unsigned long trap)
+ int signo, int si_code, void __user *addr,
+ unsigned long err, unsigned long trap)
{
if (user_mode(regs)) {
current->thread.error_code = err;
current->thread.trap_no = trap;
- force_sig_info(info->si_signo, info, current);
+ force_sig_fault(signo, si_code, addr);
} else {
die(str, regs, err);
}
@@ -380,7 +396,7 @@ int is_valid_bugaddr(unsigned long pc)
u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
#endif
- if (probe_kernel_address((unsigned *)pc, bkpt))
+ if (get_kernel_nofault(bkpt, (void *)pc))
return 0;
return bkpt == insn;
@@ -409,7 +425,8 @@ void unregister_undef_hook(struct undef_hook *hook)
raw_spin_unlock_irqrestore(&undef_lock, flags);
}
-static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
+static nokprobe_inline
+int call_undef_hook(struct pt_regs *regs, unsigned int instr)
{
struct undef_hook *hook;
unsigned long flags;
@@ -425,10 +442,9 @@ static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
return fn ? fn(regs, instr) : 1;
}
-asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
+asmlinkage void do_undefinstr(struct pt_regs *regs)
{
unsigned int instr;
- siginfo_t info;
void __user *pc;
pc = (void __user *)instruction_pointer(regs);
@@ -468,20 +484,16 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
die_sig:
#ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_UNDEFINED) {
- pr_info("%s (%d): undefined instruction: pc=%p\n",
+ pr_info("%s (%d): undefined instruction: pc=%px\n",
current->comm, task_pid_nr(current), pc);
__show_regs(regs);
dump_instr(KERN_INFO, regs);
}
#endif
-
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_ILLOPC;
- info.si_addr = pc;
-
- arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
+ arm_notify_die("Oops - undefined instruction", regs,
+ SIGILL, ILL_ILLOPC, pc, 0, 6);
}
+NOKPROBE_SYMBOL(do_undefinstr)
/*
* Handle FIQ similarly to NMI on x86 systems.
@@ -527,8 +539,6 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason)
static int bad_syscall(int n, struct pt_regs *regs)
{
- siginfo_t info;
-
if ((current->personality & PER_MASK) != PER_LINUX) {
send_sig(SIGSEGV, current, 1);
return regs->ARM_r0;
@@ -542,13 +552,10 @@ static int bad_syscall(int n, struct pt_regs *regs)
}
#endif
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_ILLTRP;
- info.si_addr = (void __user *)instruction_pointer(regs) -
- (thumb_mode(regs) ? 2 : 4);
-
- arm_notify_die("Oops - bad syscall", regs, &info, n, 0);
+ arm_notify_die("Oops - bad syscall", regs, SIGILL, ILL_ILLTRP,
+ (void __user *)instruction_pointer(regs) -
+ (thumb_mode(regs) ? 2 : 4),
+ n, 0);
return regs->ARM_r0;
}
@@ -556,6 +563,7 @@ static int bad_syscall(int n, struct pt_regs *regs)
static inline int
__do_cache_op(unsigned long start, unsigned long end)
{
+ unsigned int ua_flags;
int ret;
do {
@@ -564,7 +572,9 @@ __do_cache_op(unsigned long start, unsigned long end)
if (fatal_signal_pending(current))
return 0;
- ret = flush_cache_user_range(start, start + chunk);
+ ua_flags = uaccess_save_and_enable();
+ ret = flush_icache_user_range(start, start + chunk);
+ uaccess_restore(ua_flags);
if (ret)
return ret;
@@ -581,7 +591,7 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
if (end < start || flags)
return -EINVAL;
- if (!access_ok(VERIFY_READ, start, end - start))
+ if (!access_ok((void __user *)start, end - start))
return -EFAULT;
return __do_cache_op(start, end);
@@ -594,24 +604,18 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
asmlinkage int arm_syscall(int no, struct pt_regs *regs)
{
- siginfo_t info;
-
if ((no >> 16) != (__ARM_NR_BASE>> 16))
return bad_syscall(no, regs);
switch (no & 0xffff) {
case 0: /* branch through 0 */
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- info.si_code = SEGV_MAPERR;
- info.si_addr = NULL;
-
- arm_notify_die("branch through zero", regs, &info, 0, 0);
+ arm_notify_die("branch through zero", regs,
+ SIGSEGV, SEGV_MAPERR, NULL, 0, 0);
return 0;
case NR(breakpoint): /* SWI BREAK_POINT */
regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
- ptrace_break(current, regs);
+ ptrace_break(regs);
return regs->ARM_r0;
/*
@@ -647,6 +651,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
set_tls(regs->ARM_r0);
return 0;
+ case NR(get_tls):
+ return current_thread_info()->tp_value[0];
+
default:
/* Calls 9f00xx..9f07ff are defined to return -ENOSYS
if not implemented, rather than raising SIGILL. This
@@ -664,20 +671,17 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
if (user_debug & UDBG_SYSCALL) {
pr_err("[%d] %s: arm syscall %d\n",
task_pid_nr(current), current->comm, no);
- dump_instr("", regs);
+ dump_instr(KERN_ERR, regs);
if (user_mode(regs)) {
__show_regs(regs);
- c_backtrace(frame_pointer(regs), processor_mode(regs));
+ c_backtrace(frame_pointer(regs), processor_mode(regs), KERN_ERR);
}
}
#endif
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_ILLTRP;
- info.si_addr = (void __user *)instruction_pointer(regs) -
- (thumb_mode(regs) ? 2 : 4);
-
- arm_notify_die("Oops - bad syscall(2)", regs, &info, no, 0);
+ arm_notify_die("Oops - bad syscall(2)", regs, SIGILL, ILL_ILLTRP,
+ (void __user *)instruction_pointer(regs) -
+ (thumb_mode(regs) ? 2 : 4),
+ no, 0);
return 0;
}
@@ -727,23 +731,19 @@ asmlinkage void
baddataabort(int code, unsigned long instr, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
- siginfo_t info;
#ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_BADABORT) {
+ pr_err("8<--- cut here ---\n");
pr_err("[%d] %s: bad data abort: code %d instr 0x%08lx\n",
task_pid_nr(current), current->comm, code, instr);
dump_instr(KERN_ERR, regs);
- show_pte(current->mm, addr);
+ show_pte(KERN_ERR, current->mm, addr);
}
#endif
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_ILLOPC;
- info.si_addr = (void __user *)addr;
-
- arm_notify_die("unknown data abort code", regs, &info, instr, 0);
+ arm_notify_die("unknown data abort code", regs,
+ SIGILL, ILL_ILLOPC, (void __user *)addr, instr, 0);
}
void __readwrite_bug(const char *fn)
@@ -753,6 +753,7 @@ void __readwrite_bug(const char *fn)
}
EXPORT_SYMBOL(__readwrite_bug);
+#ifdef CONFIG_MMU
void __pte_error(const char *file, int line, pte_t pte)
{
pr_err("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte));
@@ -767,6 +768,7 @@ void __pgd_error(const char *file, int line, pgd_t pgd)
{
pr_err("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd));
}
+#endif
asmlinkage void __div0(void)
{
@@ -782,12 +784,6 @@ void abort(void)
/* if that doesn't kill us, halt */
panic("Oops failed to kill thread");
}
-EXPORT_SYMBOL(abort);
-
-void __init trap_init(void)
-{
- return;
-}
#ifdef CONFIG_KUSER_HELPERS
static void __init kuser_init(void *vectors)
@@ -810,10 +806,59 @@ static inline void __init kuser_init(void *vectors)
}
#endif
+#ifndef CONFIG_CPU_V7M
+static void copy_from_lma(void *vma, void *lma_start, void *lma_end)
+{
+ memcpy(vma, lma_start, lma_end - lma_start);
+}
+
+static void flush_vectors(void *vma, size_t offset, size_t size)
+{
+ unsigned long start = (unsigned long)vma + offset;
+ unsigned long end = start + size;
+
+ flush_icache_range(start, end);
+}
+
+#ifdef CONFIG_HARDEN_BRANCH_HISTORY
+int spectre_bhb_update_vectors(unsigned int method)
+{
+ extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[];
+ extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[];
+ void *vec_start, *vec_end;
+
+ if (system_state >= SYSTEM_FREEING_INITMEM) {
+ pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n",
+ smp_processor_id());
+ return SPECTRE_VULNERABLE;
+ }
+
+ switch (method) {
+ case SPECTRE_V2_METHOD_LOOP8:
+ vec_start = __vectors_bhb_loop8_start;
+ vec_end = __vectors_bhb_loop8_end;
+ break;
+
+ case SPECTRE_V2_METHOD_BPIALL:
+ vec_start = __vectors_bhb_bpiall_start;
+ vec_end = __vectors_bhb_bpiall_end;
+ break;
+
+ default:
+ pr_err("CPU%u: unknown Spectre BHB state %d\n",
+ smp_processor_id(), method);
+ return SPECTRE_VULNERABLE;
+ }
+
+ copy_from_lma(vectors_page, vec_start, vec_end);
+ flush_vectors(vectors_page, 0, vec_end - vec_start);
+
+ return SPECTRE_MITIGATED;
+}
+#endif
+
void __init early_trap_init(void *vectors_base)
{
-#ifndef CONFIG_CPU_V7M
- unsigned long vectors = (unsigned long)vectors_base;
extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[];
unsigned i;
@@ -834,17 +879,87 @@ void __init early_trap_init(void *vectors_base)
* into the vector page, mapped at 0xffff0000, and ensure these
* are visible to the instruction stream.
*/
- memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
- memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
+ copy_from_lma(vectors_base, __vectors_start, __vectors_end);
+ copy_from_lma(vectors_base + 0x1000, __stubs_start, __stubs_end);
kuser_init(vectors_base);
- flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
+ flush_vectors(vectors_base, 0, PAGE_SIZE * 2);
+}
#else /* ifndef CONFIG_CPU_V7M */
+void __init early_trap_init(void *vectors_base)
+{
/*
* on V7-M there is no need to copy the vector table to a dedicated
* memory area. The address is configurable and so a table in the kernel
* image can be used.
*/
+}
#endif
+
+#ifdef CONFIG_VMAP_STACK
+
+DECLARE_PER_CPU(u8 *, irq_stack_ptr);
+
+asmlinkage DEFINE_PER_CPU(u8 *, overflow_stack_ptr);
+
+static int __init allocate_overflow_stacks(void)
+{
+ u8 *stack;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ stack = (u8 *)__get_free_page(GFP_KERNEL);
+ if (WARN_ON(!stack))
+ return -ENOMEM;
+ per_cpu(overflow_stack_ptr, cpu) = &stack[OVERFLOW_STACK_SIZE];
+ }
+ return 0;
+}
+early_initcall(allocate_overflow_stacks);
+
+asmlinkage void handle_bad_stack(struct pt_regs *regs)
+{
+ unsigned long tsk_stk = (unsigned long)current->stack;
+#ifdef CONFIG_IRQSTACKS
+ unsigned long irq_stk = (unsigned long)raw_cpu_read(irq_stack_ptr);
+#endif
+ unsigned long ovf_stk = (unsigned long)raw_cpu_read(overflow_stack_ptr);
+
+ console_verbose();
+ pr_emerg("Insufficient stack space to handle exception!");
+
+ pr_emerg("Task stack: [0x%08lx..0x%08lx]\n",
+ tsk_stk, tsk_stk + THREAD_SIZE);
+#ifdef CONFIG_IRQSTACKS
+ pr_emerg("IRQ stack: [0x%08lx..0x%08lx]\n",
+ irq_stk - THREAD_SIZE, irq_stk);
+#endif
+ pr_emerg("Overflow stack: [0x%08lx..0x%08lx]\n",
+ ovf_stk - OVERFLOW_STACK_SIZE, ovf_stk);
+
+ die("kernel stack overflow", regs, 0);
+}
+
+#ifndef CONFIG_ARM_LPAE
+/*
+ * Normally, we rely on the logic in do_translation_fault() to update stale PMD
+ * entries covering the vmalloc space in a task's page tables when it first
+ * accesses the region in question. Unfortunately, this is not sufficient when
+ * the task stack resides in the vmalloc region, as do_translation_fault() is a
+ * C function that needs a stack to run.
+ *
+ * So we need to ensure that these PMD entries are up to date *before* the MM
+ * switch. As we already have some logic in the MM switch path that takes care
+ * of this, let's trigger it by bumping the counter every time the core vmalloc
+ * code modifies a PMD entry in the vmalloc region. Use release semantics on
+ * the store so that other CPUs observing the counter's new value are
+ * guaranteed to see the updated page table entries as well.
+ */
+void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
+{
+ if (start < VMALLOC_END && end > VMALLOC_START)
+ atomic_inc_return_release(&init_mm.context.vmalloc_seq);
}
+#endif
+#endif
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 0bee233fef9a..f60547dadc93 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm/kernel/unwind.c
*
* Copyright (C) 2008 ARM Limited
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- *
* Stack unwinding support for ARM
*
* An ARM EABI version of gcc is required to generate the unwind
@@ -31,9 +18,6 @@
#warning Your compiler does not have EABI support.
#warning ARM unwind is known to compile only with EABI compilers.
#warning Change compiler or disable ARM_UNWIND option.
-#elif (__GNUC__ == 4 && __GNUC_MINOR__ <= 2) && !defined(__clang__)
-#warning Your compiler is too buggy; it is known to not compile ARM unwind support.
-#warning Change compiler or disable ARM_UNWIND option.
#endif
#endif /* __CHECKER__ */
@@ -44,11 +28,14 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
+#include <linux/module.h>
#include <asm/stacktrace.h>
#include <asm/traps.h>
#include <asm/unwind.h>
+#include "reboot.h"
+
/* Dummy functions to avoid linker complaints */
void __aeabi_unwind_cpp_pr0(void)
{
@@ -69,6 +56,7 @@ struct unwind_ctrl_block {
unsigned long vrs[16]; /* virtual register set */
const unsigned long *insn; /* pointer to the current instructions word */
unsigned long sp_high; /* highest value of sp allowed */
+ unsigned long *lr_addr; /* address of LR value on the stack */
/*
* 1 : check for stack overflow for each register pop.
* 0 : save overhead if there is plenty of stack remaining.
@@ -93,7 +81,7 @@ extern const struct unwind_idx __start_unwind_idx[];
static const struct unwind_idx *__origin_unwind_idx;
extern const struct unwind_idx __stop_unwind_idx[];
-static DEFINE_SPINLOCK(unwind_lock);
+static DEFINE_RAW_SPINLOCK(unwind_lock);
static LIST_HEAD(unwind_tables);
/* Convert a prel31 symbol to an absolute address */
@@ -201,7 +189,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
/* module unwind tables */
struct unwind_table *table;
- spin_lock_irqsave(&unwind_lock, flags);
+ raw_spin_lock_irqsave(&unwind_lock, flags);
list_for_each_entry(table, &unwind_tables, list) {
if (addr >= table->begin_addr &&
addr < table->end_addr) {
@@ -213,7 +201,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
break;
}
}
- spin_unlock_irqrestore(&unwind_lock, flags);
+ raw_spin_unlock_irqrestore(&unwind_lock, flags);
}
pr_debug("%s: idx = %p\n", __func__, idx);
@@ -249,7 +237,13 @@ static int unwind_pop_register(struct unwind_ctrl_block *ctrl,
if (*vsp >= (unsigned long *)ctrl->sp_high)
return -URC_FAILURE;
- ctrl->vrs[reg] = *(*vsp)++;
+ /* Use READ_ONCE_NOCHECK here to avoid this memory access
+ * from being tracked by KASAN.
+ */
+ ctrl->vrs[reg] = READ_ONCE_NOCHECK(*(*vsp));
+ if (reg == 14)
+ ctrl->lr_addr = *vsp;
+ (*vsp)++;
return URC_OK;
}
@@ -268,8 +262,9 @@ static int unwind_exec_pop_subset_r4_to_r13(struct unwind_ctrl_block *ctrl,
mask >>= 1;
reg++;
}
- if (!load_sp)
+ if (!load_sp) {
ctrl->vrs[SP] = (unsigned long)vsp;
+ }
return URC_OK;
}
@@ -313,6 +308,29 @@ static int unwind_exec_pop_subset_r0_to_r3(struct unwind_ctrl_block *ctrl,
return URC_OK;
}
+static unsigned long unwind_decode_uleb128(struct unwind_ctrl_block *ctrl)
+{
+ unsigned long bytes = 0;
+ unsigned long insn;
+ unsigned long result = 0;
+
+ /*
+ * unwind_get_byte() will advance `ctrl` one instruction at a time, so
+ * loop until we get an instruction byte where bit 7 is not set.
+ *
+ * Note: This decodes a maximum of 4 bytes to output 28 bits data where
+ * max is 0xfffffff: that will cover a vsp increment of 1073742336, hence
+ * it is sufficient for unwinding the stack.
+ */
+ do {
+ insn = unwind_get_byte(ctrl);
+ result |= (insn & 0x7f) << (bytes * 7);
+ bytes++;
+ } while (!!(insn & 0x80) && (bytes != sizeof(result)));
+
+ return result;
+}
+
/*
* Execute the current unwind instruction.
*/
@@ -325,9 +343,9 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
if ((insn & 0xc0) == 0x00)
ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4;
- else if ((insn & 0xc0) == 0x40)
+ else if ((insn & 0xc0) == 0x40) {
ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
- else if ((insn & 0xf0) == 0x80) {
+ } else if ((insn & 0xf0) == 0x80) {
unsigned long mask;
insn = (insn << 8) | unwind_get_byte(ctrl);
@@ -342,9 +360,9 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
if (ret)
goto error;
} else if ((insn & 0xf0) == 0x90 &&
- (insn & 0x0d) != 0x0d)
+ (insn & 0x0d) != 0x0d) {
ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
- else if ((insn & 0xf0) == 0xa0) {
+ } else if ((insn & 0xf0) == 0xa0) {
ret = unwind_exec_pop_r4_to_rN(ctrl, insn);
if (ret)
goto error;
@@ -366,7 +384,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
if (ret)
goto error;
} else if (insn == 0xb2) {
- unsigned long uleb128 = unwind_get_byte(ctrl);
+ unsigned long uleb128 = unwind_decode_uleb128(ctrl);
ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
} else {
@@ -387,23 +405,32 @@ error:
*/
int unwind_frame(struct stackframe *frame)
{
- unsigned long low;
const struct unwind_idx *idx;
struct unwind_ctrl_block ctrl;
+ unsigned long sp_low;
/* store the highest address on the stack to avoid crossing it*/
- low = frame->sp;
- ctrl.sp_high = ALIGN(low, THREAD_SIZE);
+ sp_low = frame->sp;
+ ctrl.sp_high = ALIGN(sp_low - THREAD_SIZE, THREAD_ALIGN)
+ + THREAD_SIZE;
pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
frame->pc, frame->lr, frame->sp);
- if (!kernel_text_address(frame->pc))
- return -URC_FAILURE;
-
idx = unwind_find_idx(frame->pc);
if (!idx) {
- pr_warn("unwind: Index not found %08lx\n", frame->pc);
+ if (frame->pc && kernel_text_address(frame->pc)) {
+ if (in_module_plt(frame->pc) && frame->pc != frame->lr) {
+ /*
+ * Quoting Ard: Veneers only set PC using a
+ * PC+immediate LDR, and so they don't affect
+ * the state of the stack or the register file
+ */
+ frame->pc = frame->lr;
+ return URC_OK;
+ }
+ pr_warn("unwind: Index not found %08lx\n", frame->pc);
+ }
return -URC_FAILURE;
}
@@ -415,7 +442,20 @@ int unwind_frame(struct stackframe *frame)
if (idx->insn == 1)
/* can't unwind */
return -URC_FAILURE;
- else if ((idx->insn & 0x80000000) == 0)
+ else if (frame->pc == prel31_to_addr(&idx->addr_offset)) {
+ /*
+ * Unwinding is tricky when we're halfway through the prologue,
+ * since the stack frame that the unwinder expects may not be
+ * fully set up yet. However, one thing we do know for sure is
+ * that if we are unwinding from the very first instruction of
+ * a function, we are still effectively in the stack frame of
+ * the caller, and the unwind info has no relevance yet.
+ */
+ if (frame->pc == frame->lr)
+ return -URC_FAILURE;
+ frame->pc = frame->lr;
+ return URC_OK;
+ } else if ((idx->insn & 0x80000000) == 0)
/* prel31 to the unwind table */
ctrl.insn = (unsigned long *)prel31_to_addr(&idx->insn);
else if ((idx->insn & 0xff000000) == 0x80000000)
@@ -442,6 +482,16 @@ int unwind_frame(struct stackframe *frame)
ctrl.check_each_pop = 0;
+ if (prel31_to_addr(&idx->addr_offset) == (u32)&call_with_stack) {
+ /*
+ * call_with_stack() is the only place where we permit SP to
+ * jump from one stack to another, and since we know it is
+ * guaranteed to happen, set up the SP bounds accordingly.
+ */
+ sp_low = frame->fp;
+ ctrl.sp_high = ALIGN(frame->fp, THREAD_SIZE);
+ }
+
while (ctrl.entries > 0) {
int urc;
if ((ctrl.sp_high - ctrl.vrs[SP]) < sizeof(ctrl.vrs))
@@ -449,7 +499,7 @@ int unwind_frame(struct stackframe *frame)
urc = unwind_exec_insn(&ctrl);
if (urc < 0)
return urc;
- if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= ctrl.sp_high)
+ if (ctrl.vrs[SP] < sp_low || ctrl.vrs[SP] > ctrl.sp_high)
return -URC_FAILURE;
}
@@ -457,21 +507,25 @@ int unwind_frame(struct stackframe *frame)
ctrl.vrs[PC] = ctrl.vrs[LR];
/* check for infinite loop */
- if (frame->pc == ctrl.vrs[PC])
+ if (frame->pc == ctrl.vrs[PC] && frame->sp == ctrl.vrs[SP])
return -URC_FAILURE;
frame->fp = ctrl.vrs[FP];
frame->sp = ctrl.vrs[SP];
frame->lr = ctrl.vrs[LR];
frame->pc = ctrl.vrs[PC];
+ frame->lr_addr = ctrl.lr_addr;
return URC_OK;
}
-void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl)
{
struct stackframe frame;
+ printk("%sCall trace: ", loglvl);
+
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
if (!tsk)
@@ -486,7 +540,12 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_stack_pointer;
frame.lr = (unsigned long)__builtin_return_address(0);
- frame.pc = (unsigned long)unwind_backtrace;
+ /* We are saving the stack and execution state at this
+ * point, so we should ensure that frame.pc is within
+ * this block of code.
+ */
+here:
+ frame.pc = (unsigned long)&&here;
} else {
/* task blocked in __switch_to */
frame.fp = thread_saved_fp(tsk);
@@ -506,7 +565,7 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
urc = unwind_frame(&frame);
if (urc < 0)
break;
- dump_backtrace_entry(where, frame.pc, frame.sp - 4);
+ dump_backtrace_entry(where, frame.pc, frame.sp - 4, loglvl);
}
}
@@ -529,9 +588,9 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
tab->begin_addr = text_addr;
tab->end_addr = text_addr + text_size;
- spin_lock_irqsave(&unwind_lock, flags);
+ raw_spin_lock_irqsave(&unwind_lock, flags);
list_add_tail(&tab->list, &unwind_tables);
- spin_unlock_irqrestore(&unwind_lock, flags);
+ raw_spin_unlock_irqrestore(&unwind_lock, flags);
return tab;
}
@@ -543,9 +602,9 @@ void unwind_table_del(struct unwind_table *tab)
if (!tab)
return;
- spin_lock_irqsave(&unwind_lock, flags);
+ raw_spin_lock_irqsave(&unwind_lock, flags);
list_del(&tab->list);
- spin_unlock_irqrestore(&unwind_lock, flags);
+ raw_spin_unlock_irqrestore(&unwind_lock, flags);
kfree(tab);
}
diff --git a/arch/arm/kernel/v7m.c b/arch/arm/kernel/v7m.c
index 4d2cba94f5cc..094c5c59fc72 100644
--- a/arch/arm/kernel/v7m.c
+++ b/arch/arm/kernel/v7m.c
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Uwe Kleine-Koenig for Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License version 2 as published by the
- * Free Software Foundation.
*/
#include <linux/io.h>
#include <linux/reboot.h>
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index a4d6dc0f2427..e38a30477f3d 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -1,23 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Adapted from arm64 version.
*
* Copyright (C) 2012 ARM Limited
* Copyright (C) 2015 Mentor Graphics Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/cache.h>
+#include <linux/vdso_datastore.h>
#include <linux/elf.h>
#include <linux/err.h>
#include <linux/kernel.h>
@@ -25,47 +15,28 @@
#include <linux/of.h>
#include <linux/printk.h>
#include <linux/slab.h>
-#include <linux/timekeeper_internal.h>
#include <linux/vmalloc.h>
#include <asm/arch_timer.h>
#include <asm/barrier.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
#include <asm/vdso.h>
-#include <asm/vdso_datapage.h>
#include <clocksource/arm_arch_timer.h>
+#include <vdso/helpers.h>
+#include <vdso/vsyscall.h>
#define MAX_SYMNAME 64
static struct page **vdso_text_pagelist;
+extern char vdso_start[], vdso_end[];
+
/* Total number of pages needed for the data and text portions of the VDSO. */
unsigned int vdso_total_pages __ro_after_init;
-/*
- * The VDSO data page.
- */
-static union vdso_data_store vdso_data_store __page_aligned_data;
-static struct vdso_data *vdso_data = &vdso_data_store.data;
-
-static struct page *vdso_data_page __ro_after_init;
-static const struct vm_special_mapping vdso_data_mapping = {
- .name = "[vvar]",
- .pages = &vdso_data_page,
-};
-
static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma)
{
- unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
- unsigned long vdso_size;
-
- /* without VVAR page */
- vdso_size = (vdso_total_pages - 1) << PAGE_SHIFT;
-
- if (vdso_size != new_size)
- return -EINVAL;
-
current->mm->context.vdso = new_vma->vm_start;
return 0;
@@ -83,11 +54,9 @@ struct elfinfo {
char *dynstr; /* ptr to .dynstr section */
};
-/* Cached result of boot-time check for whether the arch timer exists,
- * and if so, whether the virtual counter is useable.
+/* Boot-time check for whether the arch timer exists, and if so,
+ * whether the virtual counter is usable.
*/
-static bool cntvct_ok __ro_after_init;
-
static bool __init cntvct_functional(void)
{
struct device_node *np;
@@ -102,6 +71,8 @@ static bool __init cntvct_functional(void)
*/
np = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
if (!np)
+ np = of_find_compatible_node(NULL, NULL, "arm,armv8-timer");
+ if (!np)
goto out_put;
if (of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
@@ -149,7 +120,7 @@ static Elf32_Sym * __init find_symbol(struct elfinfo *lib, const char *symname)
if (lib->dynsym[i].st_name == 0)
continue;
- strlcpy(name, lib->dynstr + lib->dynsym[i].st_name,
+ strscpy(name, lib->dynstr + lib->dynsym[i].st_name,
MAX_SYMNAME);
c = strchr(name, '@');
if (c)
@@ -186,9 +157,10 @@ static void __init patch_vdso(void *ehdr)
* want programs to incur the slight additional overhead of
* dispatching through the VDSO only to fall back to syscalls.
*/
- if (!cntvct_ok) {
+ if (!cntvct_functional()) {
vdso_nullpatch_one(&einfo, "__vdso_gettimeofday");
vdso_nullpatch_one(&einfo, "__vdso_clock_gettime");
+ vdso_nullpatch_one(&einfo, "__vdso_clock_gettime64");
}
}
@@ -197,13 +169,12 @@ static int __init vdso_init(void)
unsigned int text_pages;
int i;
- if (memcmp(&vdso_start, "\177ELF", 4)) {
+ if (memcmp(vdso_start, "\177ELF", 4)) {
pr_err("VDSO is not a valid ELF object!\n");
return -ENOEXEC;
}
- text_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
- pr_debug("vdso: %i text pages at base %p\n", text_pages, &vdso_start);
+ text_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
/* Allocate the VDSO text pagelist */
vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *),
@@ -211,42 +182,28 @@ static int __init vdso_init(void)
if (vdso_text_pagelist == NULL)
return -ENOMEM;
- /* Grab the VDSO data page. */
- vdso_data_page = virt_to_page(vdso_data);
-
/* Grab the VDSO text pages. */
for (i = 0; i < text_pages; i++) {
struct page *page;
- page = virt_to_page(&vdso_start + i * PAGE_SIZE);
+ page = virt_to_page(vdso_start + i * PAGE_SIZE);
vdso_text_pagelist[i] = page;
}
vdso_text_mapping.pages = vdso_text_pagelist;
- vdso_total_pages = 1; /* for the data/vvar page */
+ vdso_total_pages = VDSO_NR_PAGES; /* for the data/vvar pages */
vdso_total_pages += text_pages;
- cntvct_ok = cntvct_functional();
-
- patch_vdso(&vdso_start);
+ patch_vdso(vdso_start);
return 0;
}
arch_initcall(vdso_init);
-static int install_vvar(struct mm_struct *mm, unsigned long addr)
-{
- struct vm_area_struct *vma;
-
- vma = _install_special_mapping(mm, addr, PAGE_SIZE,
- VM_READ | VM_MAYREAD,
- &vdso_data_mapping);
+static_assert(__VDSO_PAGES == VDSO_NR_PAGES);
- return PTR_ERR_OR_ZERO(vma);
-}
-
-/* assumes mmap_sem is write-locked */
+/* assumes mmap_lock is write-locked */
void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma;
@@ -257,12 +214,12 @@ void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
if (vdso_text_pagelist == NULL)
return;
- if (install_vvar(mm, addr))
+ if (IS_ERR(vdso_install_vvar_mapping(mm, addr)))
return;
- /* Account for vvar page. */
- addr += PAGE_SIZE;
- len = (vdso_total_pages - 1) << PAGE_SHIFT;
+ /* Account for vvar pages. */
+ addr += VDSO_NR_PAGES * PAGE_SIZE;
+ len = (vdso_total_pages - VDSO_NR_PAGES) << PAGE_SHIFT;
vma = _install_special_mapping(mm, addr, len,
VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
@@ -272,84 +229,3 @@ void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
mm->context.vdso = addr;
}
-static void vdso_write_begin(struct vdso_data *vdata)
-{
- ++vdso_data->seq_count;
- smp_wmb(); /* Pairs with smp_rmb in vdso_read_retry */
-}
-
-static void vdso_write_end(struct vdso_data *vdata)
-{
- smp_wmb(); /* Pairs with smp_rmb in vdso_read_begin */
- ++vdso_data->seq_count;
-}
-
-static bool tk_is_cntvct(const struct timekeeper *tk)
-{
- if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER))
- return false;
-
- if (!tk->tkr_mono.clock->archdata.vdso_direct)
- return false;
-
- return true;
-}
-
-/**
- * update_vsyscall - update the vdso data page
- *
- * Increment the sequence counter, making it odd, indicating to
- * userspace that an update is in progress. Update the fields used
- * for coarse clocks and, if the architected system timer is in use,
- * the fields used for high precision clocks. Increment the sequence
- * counter again, making it even, indicating to userspace that the
- * update is finished.
- *
- * Userspace is expected to sample seq_count before reading any other
- * fields from the data page. If seq_count is odd, userspace is
- * expected to wait until it becomes even. After copying data from
- * the page, userspace must sample seq_count again; if it has changed
- * from its previous value, userspace must retry the whole sequence.
- *
- * Calls to update_vsyscall are serialized by the timekeeping core.
- */
-void update_vsyscall(struct timekeeper *tk)
-{
- struct timespec64 *wtm = &tk->wall_to_monotonic;
-
- if (!cntvct_ok) {
- /* The entry points have been zeroed, so there is no
- * point in updating the data page.
- */
- return;
- }
-
- vdso_write_begin(vdso_data);
-
- vdso_data->tk_is_cntvct = tk_is_cntvct(tk);
- vdso_data->xtime_coarse_sec = tk->xtime_sec;
- vdso_data->xtime_coarse_nsec = (u32)(tk->tkr_mono.xtime_nsec >>
- tk->tkr_mono.shift);
- vdso_data->wtm_clock_sec = wtm->tv_sec;
- vdso_data->wtm_clock_nsec = wtm->tv_nsec;
-
- if (vdso_data->tk_is_cntvct) {
- vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
- vdso_data->xtime_clock_sec = tk->xtime_sec;
- vdso_data->xtime_clock_snsec = tk->tkr_mono.xtime_nsec;
- vdso_data->cs_mult = tk->tkr_mono.mult;
- vdso_data->cs_shift = tk->tkr_mono.shift;
- vdso_data->cs_mask = tk->tkr_mono.mask;
- }
-
- vdso_write_end(vdso_data);
-
- flush_dcache_page(virt_to_page(vdso_data));
-}
-
-void update_vsyscall_tz(void)
-{
- vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
- vdso_data->tz_dsttime = sys_tz.tz_dsttime;
- flush_dcache_page(virt_to_page(vdso_data));
-}
diff --git a/arch/arm/kernel/vmcore_info.c b/arch/arm/kernel/vmcore_info.c
new file mode 100644
index 000000000000..1437aba47787
--- /dev/null
+++ b/arch/arm/kernel/vmcore_info.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/vmcore_info.h>
+
+void arch_crash_save_vmcoreinfo(void)
+{
+#ifdef CONFIG_ARM_LPAE
+ VMCOREINFO_CONFIG(ARM_LPAE);
+#endif
+}
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
index 8265b116218d..f2e8d4fac068 100644
--- a/arch/arm/kernel/vmlinux-xip.lds.S
+++ b/arch/arm/kernel/vmlinux-xip.lds.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* ld script to make ARM Linux kernel
* taken from the i386 version by Russell King
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
@@ -6,44 +7,13 @@
/* No __ro_after_init data in the .rodata section - which will always be ro */
#define RO_AFTER_INIT_DATA
-#include <asm-generic/vmlinux.lds.h>
+#include <linux/sizes.h>
+
+#include <asm/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
-#include <asm/memory.h>
#include <asm/page.h>
-
-#define PROC_INFO \
- . = ALIGN(4); \
- VMLINUX_SYMBOL(__proc_info_begin) = .; \
- *(.proc.info.init) \
- VMLINUX_SYMBOL(__proc_info_end) = .;
-
-#define IDMAP_TEXT \
- ALIGN_FUNCTION(); \
- VMLINUX_SYMBOL(__idmap_text_start) = .; \
- *(.idmap.text) \
- VMLINUX_SYMBOL(__idmap_text_end) = .; \
- . = ALIGN(PAGE_SIZE); \
- VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
- *(.hyp.idmap.text) \
- VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
-
-#ifdef CONFIG_HOTPLUG_CPU
-#define ARM_CPU_DISCARD(x)
-#define ARM_CPU_KEEP(x) x
-#else
-#define ARM_CPU_DISCARD(x) x
-#define ARM_CPU_KEEP(x)
-#endif
-
-#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
- defined(CONFIG_GENERIC_BUG)
-#define ARM_EXIT_KEEP(x) x
-#define ARM_EXIT_DISCARD(x)
-#else
-#define ARM_EXIT_KEEP(x)
-#define ARM_EXIT_DISCARD(x) x
-#endif
+#include <asm/mpu.h>
OUTPUT_ARCH(arm)
ENTRY(stext)
@@ -66,22 +36,13 @@ SECTIONS
* unwind sections get included.
*/
/DISCARD/ : {
- *(.ARM.exidx.exit.text)
- *(.ARM.extab.exit.text)
- ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
- ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
- ARM_EXIT_DISCARD(EXIT_TEXT)
- ARM_EXIT_DISCARD(EXIT_DATA)
- EXIT_CALL
-#ifndef CONFIG_MMU
- *(.text.fixup)
- *(__ex_table)
-#endif
-#ifndef CONFIG_SMP_ON_UP
+ ARM_DISCARD
*(.alt.smp.init)
+ *(.pv_table)
+#ifndef CONFIG_ARM_UNWIND
+ *(.ARM.exidx) *(.ARM.exidx.*)
+ *(.ARM.extab) *(.ARM.extab.*)
#endif
- *(.discard)
- *(.discard.*)
}
. = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
@@ -94,22 +55,7 @@ SECTIONS
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
- IDMAP_TEXT
- __exception_text_start = .;
- *(.exception.text)
- __exception_text_end = .;
- IRQENTRY_TEXT
- TEXT_TEXT
- SCHED_TEXT
- CPUIDLE_TEXT
- LOCK_TEXT
- KPROBES_TEXT
- *(.gnu.warning)
- *(.glue_7)
- *(.glue_7t)
- . = ALIGN(4);
- *(.got) /* Global offset table */
- ARM_CPU_KEEP(PROC_INFO)
+ ARM_TEXT
}
RO_DATA(PAGE_SIZE)
@@ -117,53 +63,17 @@ SECTIONS
. = ALIGN(4);
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
-#ifdef CONFIG_MMU
- *(__ex_table)
-#endif
+ ARM_MMU_KEEP(*(__ex_table))
__stop___ex_table = .;
}
#ifdef CONFIG_ARM_UNWIND
- /*
- * Stack unwinding tables
- */
- . = ALIGN(8);
- .ARM.unwind_idx : {
- __start_unwind_idx = .;
- *(.ARM.exidx*)
- __stop_unwind_idx = .;
- }
- .ARM.unwind_tab : {
- __start_unwind_tab = .;
- *(.ARM.extab*)
- __stop_unwind_tab = .;
- }
+ ARM_UNWIND_SECTIONS
#endif
- NOTES
-
_etext = .; /* End of text and rodata section */
- /*
- * The vectors and stubs are relocatable code, and the
- * only thing that matters is their relative offsets
- */
- __vectors_start = .;
- .vectors 0xffff0000 : AT(__vectors_start) {
- *(.vectors)
- }
- . = __vectors_start + SIZEOF(.vectors);
- __vectors_end = .;
-
- __stubs_start = .;
- .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {
- *(.stubs)
- }
- . = __stubs_start + SIZEOF(.stubs);
- __stubs_end = .;
-
- PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
-
+ ARM_VECTORS
INIT_TEXT_SECTION(8)
.exit.text : {
ARM_EXIT_KEEP(EXIT_TEXT)
@@ -173,7 +83,7 @@ SECTIONS
}
.init.arch.info : {
__arch_info_begin = .;
- *(.arch.info.init)
+ KEEP(*(.arch.info.init))
__arch_info_end = .;
}
.init.tagtable : {
@@ -181,130 +91,72 @@ SECTIONS
*(.taglist.init)
__tagtable_end = .;
}
-#ifdef CONFIG_SMP_ON_UP
- .init.smpalt : {
- __smpalt_begin = .;
- *(.alt.smp.init)
- __smpalt_end = .;
- }
-#endif
- .init.pv_table : {
- __pv_table_begin = .;
- *(.pv_table)
- __pv_table_end = .;
- }
- .init.data : {
+ .init.rodata : {
INIT_SETUP(16)
INIT_CALLS
CON_INITCALL
- SECURITY_INITCALL
INIT_RAM_FS
}
-#ifdef CONFIG_SMP
- PERCPU_SECTION(L1_CACHE_BYTES)
+#ifdef CONFIG_ARM_MPU
+ . = ALIGN(SZ_128K);
#endif
-
_exiprom = .; /* End of XIP ROM area */
- __data_loc = ALIGN(4); /* location in binary */
- . = PAGE_OFFSET + TEXT_OFFSET;
- .data : AT(__data_loc) {
- _data = .; /* address in memory */
- _sdata = .;
-
- /*
- * first, the init task union, aligned
- * to an 8192 byte boundary.
- */
- INIT_TASK_DATA(THREAD_SIZE)
+/*
+ * From this point, stuff is considered writable and will be copied to RAM
+ */
+ __data_loc = ALIGN(4); /* location in file */
+ . = PAGE_OFFSET + TEXT_OFFSET; /* location in memory */
+#undef LOAD_OFFSET
+#define LOAD_OFFSET (PAGE_OFFSET + TEXT_OFFSET - __data_loc)
+
+ . = ALIGN(THREAD_SIZE);
+ _sdata = .;
+ RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+ .data.ro_after_init : AT(ADDR(.data.ro_after_init) - LOAD_OFFSET) {
+ *(.data..ro_after_init)
+ }
+ _edata = .;
- . = ALIGN(PAGE_SIZE);
- __init_begin = .;
+ . = ALIGN(PAGE_SIZE);
+ __init_begin = .;
+ .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
INIT_DATA
+ }
+ .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
ARM_EXIT_KEEP(EXIT_DATA)
- . = ALIGN(PAGE_SIZE);
- __init_end = .;
-
- *(.data..ro_after_init)
-
- NOSAVE_DATA
- CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
- READ_MOSTLY_DATA(L1_CACHE_BYTES)
-
- /*
- * and the usual data section
- */
- DATA_DATA
- CONSTRUCTORS
-
- _edata = .;
}
- _edata_loc = __data_loc + SIZEOF(.data);
-
- BUG_TABLE
+#ifdef CONFIG_SMP
+ PERCPU_SECTION(L1_CACHE_BYTES)
+#endif
#ifdef CONFIG_HAVE_TCM
- /*
- * We align everything to a page boundary so we can
- * free it after init has commenced and TCM contents have
- * been copied to its destination.
- */
- .tcm_start : {
- . = ALIGN(PAGE_SIZE);
- __tcm_start = .;
- __itcm_start = .;
- }
-
- /*
- * Link these to the ITCM RAM
- * Put VMA to the TCM address and LMA to the common RAM
- * and we'll upload the contents from RAM to TCM and free
- * the used RAM after that.
- */
- .text_itcm ITCM_OFFSET : AT(__itcm_start)
- {
- __sitcm_text = .;
- *(.tcm.text)
- *(.tcm.rodata)
- . = ALIGN(4);
- __eitcm_text = .;
- }
+ ARM_TCM
+#endif
/*
- * Reset the dot pointer, this is needed to create the
- * relative __dtcm_start below (to be used as extern in code).
+ * End of copied data. We need a dummy section to get its LMA.
+ * Also located before final ALIGN() as trailing padding is not stored
+ * in the resulting binary file and useless to copy.
*/
- . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
-
- .dtcm_start : {
- __dtcm_start = .;
- }
-
- /* TODO: add remainder of ITCM as well, that can be used for data! */
- .data_dtcm DTCM_OFFSET : AT(__dtcm_start)
- {
- . = ALIGN(4);
- __sdtcm_data = .;
- *(.tcm.data)
- . = ALIGN(4);
- __edtcm_data = .;
- }
+ .data.endmark : AT(ADDR(.data.endmark) - LOAD_OFFSET) { }
+ _edata_loc = LOADADDR(.data.endmark);
- /* Reset the dot pointer or the linker gets confused */
- . = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
+ . = ALIGN(PAGE_SIZE);
+ __init_end = .;
- /* End marker for freeing TCM copy in linked object */
- .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
- . = ALIGN(PAGE_SIZE);
- __tcm_end = .;
- }
+ BSS_SECTION(0, 0, 8)
+#ifdef CONFIG_ARM_MPU
+ . = ALIGN(PMSAv8_MINALIGN);
#endif
-
- BSS_SECTION(0, 0, 0)
_end = .;
STABS_DEBUG
+ DWARF_DEBUG
+ ARM_DETAILS
+
+ ARM_ASSERTS
}
/*
@@ -313,12 +165,32 @@ SECTIONS
* binutils is too old (for other reasons as well)
*/
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
+#ifndef CONFIG_COMPILE_TEST
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
+#endif
+
+#ifdef CONFIG_XIP_DEFLATED_DATA
+/*
+ * The .bss is used as a stack area for __inflate_kernel_data() whose stack
+ * frame is 9568 bytes. Make sure it has extra room left.
+ */
+ASSERT((_end - __bss_start) >= 12288, ".bss too small for CONFIG_XIP_DEFLATED_DATA")
+#endif
+#if defined(CONFIG_ARM_MPU) && !defined(CONFIG_COMPILE_TEST)
/*
- * The HYP init code can't be more than a page long,
- * and should not cross a page boundary.
- * The above comment applies as well.
+ * Due to PMSAv7 restriction on base address and size we have to
+ * enforce minimal alignment restrictions. It was seen that weaker
+ * alignment restriction on _xiprom will likely force XIP address
+ * space spawns multiple MPU regions thus it is likely we run in
+ * situation when we are reprogramming MPU region we run on with
+ * something which doesn't cover reprogramming code itself, so as soon
+ * as we update MPU settings we'd immediately try to execute straight
+ * from background region which is XN.
+ * It seem that alignment in 1M should suit most users.
+ * _exiprom is aligned as 1/8 of 1M so can be covered by subregion
+ * disable
*/
-ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
- "HYP init code too big or misaligned")
+ASSERT(!(_xiprom & (SZ_1M - 1)), "XIP start address may cause MPU programming issues")
+ASSERT(!(_exiprom & (SZ_128K - 1)), "XIP end address may cause MPU programming issues")
+#endif
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index c83a7ba737d6..d592a203f9c6 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* ld script to make ARM Linux kernel
* taken from the i386 version by Russell King
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
@@ -7,50 +8,12 @@
#include "vmlinux-xip.lds.S"
#else
-#include <asm-generic/vmlinux.lds.h>
+#include <linux/pgtable.h>
+#include <asm/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
-#include <asm/memory.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
-
-#define PROC_INFO \
- . = ALIGN(4); \
- VMLINUX_SYMBOL(__proc_info_begin) = .; \
- *(.proc.info.init) \
- VMLINUX_SYMBOL(__proc_info_end) = .;
-
-#define HYPERVISOR_TEXT \
- VMLINUX_SYMBOL(__hyp_text_start) = .; \
- *(.hyp.text) \
- VMLINUX_SYMBOL(__hyp_text_end) = .;
-
-#define IDMAP_TEXT \
- ALIGN_FUNCTION(); \
- VMLINUX_SYMBOL(__idmap_text_start) = .; \
- *(.idmap.text) \
- VMLINUX_SYMBOL(__idmap_text_end) = .; \
- . = ALIGN(PAGE_SIZE); \
- VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
- *(.hyp.idmap.text) \
- VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
-
-#ifdef CONFIG_HOTPLUG_CPU
-#define ARM_CPU_DISCARD(x)
-#define ARM_CPU_KEEP(x) x
-#else
-#define ARM_CPU_DISCARD(x) x
-#define ARM_CPU_KEEP(x)
-#endif
-
-#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
- defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL)
-#define ARM_EXIT_KEEP(x) x
-#define ARM_EXIT_DISCARD(x)
-#else
-#define ARM_EXIT_KEEP(x)
-#define ARM_EXIT_DISCARD(x) x
-#endif
+#include <asm/mpu.h>
OUTPUT_ARCH(arm)
ENTRY(stext)
@@ -73,25 +36,17 @@ SECTIONS
* unwind sections get included.
*/
/DISCARD/ : {
- *(.ARM.exidx.exit.text)
- *(.ARM.extab.exit.text)
- ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
- ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
- ARM_EXIT_DISCARD(EXIT_TEXT)
- ARM_EXIT_DISCARD(EXIT_DATA)
- EXIT_CALL
-#ifndef CONFIG_MMU
- *(.text.fixup)
- *(__ex_table)
-#endif
+ ARM_DISCARD
#ifndef CONFIG_SMP_ON_UP
*(.alt.smp.init)
#endif
- *(.discard)
- *(.discard.*)
+#ifndef CONFIG_ARM_UNWIND
+ *(.ARM.exidx) *(.ARM.exidx.*)
+ *(.ARM.extab) *(.ARM.extab.*)
+#endif
}
- . = PAGE_OFFSET + TEXT_OFFSET;
+ . = KERNEL_OFFSET + TEXT_OFFSET;
.head.text : {
_text = .;
HEAD_TEXT
@@ -101,26 +56,12 @@ SECTIONS
. = ALIGN(1<<SECTION_SHIFT);
#endif
+#ifdef CONFIG_ARM_MPU
+ . = ALIGN(PMSAv8_MINALIGN);
+#endif
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
- IDMAP_TEXT
- __exception_text_start = .;
- *(.exception.text)
- __exception_text_end = .;
- IRQENTRY_TEXT
- SOFTIRQENTRY_TEXT
- TEXT_TEXT
- SCHED_TEXT
- CPUIDLE_TEXT
- LOCK_TEXT
- HYPERVISOR_TEXT
- KPROBES_TEXT
- *(.gnu.warning)
- *(.glue_7)
- *(.glue_7t)
- . = ALIGN(4);
- *(.got) /* Global offset table */
- ARM_CPU_KEEP(PROC_INFO)
+ ARM_TEXT
}
#ifdef CONFIG_DEBUG_ALIGN_RODATA
@@ -133,31 +74,14 @@ SECTIONS
. = ALIGN(4);
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
-#ifdef CONFIG_MMU
- *(__ex_table)
-#endif
+ ARM_MMU_KEEP(*(__ex_table))
__stop___ex_table = .;
}
#ifdef CONFIG_ARM_UNWIND
- /*
- * Stack unwinding tables
- */
- . = ALIGN(8);
- .ARM.unwind_idx : {
- __start_unwind_idx = .;
- *(.ARM.exidx*)
- __stop_unwind_idx = .;
- }
- .ARM.unwind_tab : {
- __start_unwind_tab = .;
- *(.ARM.extab*)
- __stop_unwind_tab = .;
- }
+ ARM_UNWIND_SECTIONS
#endif
- NOTES
-
#ifdef CONFIG_STRICT_KERNEL_RWX
. = ALIGN(1<<SECTION_SHIFT);
#else
@@ -165,26 +89,7 @@ SECTIONS
#endif
__init_begin = .;
- /*
- * The vectors and stubs are relocatable code, and the
- * only thing that matters is their relative offsets
- */
- __vectors_start = .;
- .vectors 0xffff0000 : AT(__vectors_start) {
- *(.vectors)
- }
- . = __vectors_start + SIZEOF(.vectors);
- __vectors_end = .;
-
- __stubs_start = .;
- .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {
- *(.stubs)
- }
- . = __stubs_start + SIZEOF(.stubs);
- __stubs_end = .;
-
- PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
-
+ ARM_VECTORS
INIT_TEXT_SECTION(8)
.exit.text : {
ARM_EXIT_KEEP(EXIT_TEXT)
@@ -194,7 +99,7 @@ SECTIONS
}
.init.arch.info : {
__arch_info_begin = .;
- *(.arch.info.init)
+ KEEP(*(.arch.info.init))
__arch_info_end = .;
}
.init.tagtable : {
@@ -211,17 +116,12 @@ SECTIONS
#endif
.init.pv_table : {
__pv_table_begin = .;
- *(.pv_table)
+ KEEP(*(.pv_table))
__pv_table_end = .;
}
- .init.data : {
- INIT_DATA
- INIT_SETUP(16)
- INIT_CALLS
- CON_INITCALL
- SECURITY_INITCALL
- INIT_RAM_FS
- }
+
+ INIT_DATA_SECTION(16)
+
.exit.data : {
ARM_EXIT_KEEP(EXIT_DATA)
}
@@ -230,101 +130,32 @@ SECTIONS
PERCPU_SECTION(L1_CACHE_BYTES)
#endif
+#ifdef CONFIG_HAVE_TCM
+ ARM_TCM
+#endif
+
#ifdef CONFIG_STRICT_KERNEL_RWX
. = ALIGN(1<<SECTION_SHIFT);
#else
- . = ALIGN(THREAD_SIZE);
+ . = ALIGN(THREAD_ALIGN);
#endif
__init_end = .;
- __data_loc = .;
- .data : AT(__data_loc) {
- _data = .; /* address in memory */
- _sdata = .;
-
- /*
- * first, the init task union, aligned
- * to an 8192 byte boundary.
- */
- INIT_TASK_DATA(THREAD_SIZE)
-
- NOSAVE_DATA
- CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
- READ_MOSTLY_DATA(L1_CACHE_BYTES)
-
- /*
- * and the usual data section
- */
- DATA_DATA
- CONSTRUCTORS
-
- _edata = .;
- }
- _edata_loc = __data_loc + SIZEOF(.data);
-
- BUG_TABLE
-
-#ifdef CONFIG_HAVE_TCM
- /*
- * We align everything to a page boundary so we can
- * free it after init has commenced and TCM contents have
- * been copied to its destination.
- */
- .tcm_start : {
- . = ALIGN(PAGE_SIZE);
- __tcm_start = .;
- __itcm_start = .;
- }
-
- /*
- * Link these to the ITCM RAM
- * Put VMA to the TCM address and LMA to the common RAM
- * and we'll upload the contents from RAM to TCM and free
- * the used RAM after that.
- */
- .text_itcm ITCM_OFFSET : AT(__itcm_start)
- {
- __sitcm_text = .;
- *(.tcm.text)
- *(.tcm.rodata)
- . = ALIGN(4);
- __eitcm_text = .;
- }
-
- /*
- * Reset the dot pointer, this is needed to create the
- * relative __dtcm_start below (to be used as extern in code).
- */
- . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
-
- .dtcm_start : {
- __dtcm_start = .;
- }
-
- /* TODO: add remainder of ITCM as well, that can be used for data! */
- .data_dtcm DTCM_OFFSET : AT(__dtcm_start)
- {
- . = ALIGN(4);
- __sdtcm_data = .;
- *(.tcm.data)
- . = ALIGN(4);
- __edtcm_data = .;
- }
-
- /* Reset the dot pointer or the linker gets confused */
- . = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
-
- /* End marker for freeing TCM copy in linked object */
- .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
- . = ALIGN(PAGE_SIZE);
- __tcm_end = .;
- }
-#endif
+ _sdata = .;
+ RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
+ _edata = .;
BSS_SECTION(0, 0, 0)
+#ifdef CONFIG_ARM_MPU
+ . = ALIGN(PMSAv8_MINALIGN);
+#endif
_end = .;
STABS_DEBUG
+ DWARF_DEBUG
+ ARM_DETAILS
+
+ ARM_ASSERTS
}
#ifdef CONFIG_STRICT_KERNEL_RWX
@@ -342,14 +173,8 @@ __start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT);
* binutils is too old (for other reasons as well)
*/
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
+#ifndef CONFIG_COMPILE_TEST
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
-
-/*
- * The HYP init code can't be more than a page long,
- * and should not cross a page boundary.
- * The above comment applies as well.
- */
-ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
- "HYP init code too big or misaligned")
+#endif
#endif /* CONFIG_XIP_KERNEL */
diff --git a/arch/arm/kernel/xscale-cp0.c b/arch/arm/kernel/xscale-cp0.c
index 77a2eef72115..00d00d3aae97 100644
--- a/arch/arm/kernel/xscale-cp0.c
+++ b/arch/arm/kernel/xscale-cp0.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/xscale-cp0.c
*
* XScale DSP and iWMMXt coprocessor context switching and handling
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/types.h>
@@ -169,6 +166,7 @@ static int __init xscale_cp0_init(void)
pr_info("XScale iWMMXt coprocessor detected.\n");
elf_hwcap |= HWCAP_IWMMXT;
thread_register_notifier(&iwmmxt_notifier_block);
+ register_iwmmxt_undef_handler();
#endif
} else {
pr_info("XScale DSP coprocessor detected.\n");