diff options
Diffstat (limited to 'arch/arm/kernel')
78 files changed, 2840 insertions, 2862 deletions
diff --git a/arch/arm/kernel/.gitignore b/arch/arm/kernel/.gitignore index c5f676c3c224..bbb90f92d051 100644 --- a/arch/arm/kernel/.gitignore +++ b/arch/arm/kernel/.gitignore @@ -1 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only vmlinux.lds diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 8b679e2ca3c3..89a77e3f51d2 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -10,6 +10,7 @@ ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_insn.o = -pg CFLAGS_REMOVE_patch.o = -pg +CFLAGS_REMOVE_unwind.o = -pg endif CFLAGS_REMOVE_return_address.o = -pg @@ -17,10 +18,13 @@ CFLAGS_REMOVE_return_address.o = -pg # Object file lists. obj-y := elf.o entry-common.o irq.o opcodes.o \ - process.o ptrace.o reboot.o \ + process.o ptrace.o reboot.o io.o \ setup.o signal.o sigreturn_codes.o \ stacktrace.o sys_arm.o time.o traps.o +KASAN_SANITIZE_stacktrace.o := n +KASAN_SANITIZE_traps.o := n + ifneq ($(CONFIG_ARM_UNWIND),y) obj-$(CONFIG_FRAME_POINTER) += return_address.o endif @@ -41,7 +45,6 @@ obj-$(CONFIG_ISA_DMA_API) += dma.o obj-$(CONFIG_FIQ) += fiq.o fiqasm.o obj-$(CONFIG_MODULES) += armksyms.o module.o obj-$(CONFIG_ARM_MODULE_PLTS) += module-plts.o -obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_PCI) += bios32.o isa.o obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o obj-$(CONFIG_HIBERNATION) += hibernate.o @@ -53,10 +56,11 @@ obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o obj-$(CONFIG_ARM_ARCH_TIMER) += arch_timer.o obj-$(CONFIG_FUNCTION_TRACER) += entry-ftrace.o -obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o -obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o +obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o patch.o +obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o patch.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o insn.o patch.o -obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o +obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o +obj-$(CONFIG_VMCORE_INFO) += vmcore_info.o # Main staffs in KPROBES are in arch/arm/probes/ . obj-$(CONFIG_KPROBES) += patch.o insn.o obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o @@ -67,14 +71,11 @@ obj-$(CONFIG_HAVE_TCM) += tcm.o obj-$(CONFIG_OF) += devtree.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_SWP_EMULATE) += swp_emulate.o -CFLAGS_swp_emulate.o := -Wa,-march=armv7-a obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o -obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o -obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o obj-$(CONFIG_IWMMXT) += iwmmxt.o obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \ @@ -83,15 +84,12 @@ AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o obj-$(CONFIG_VDSO) += vdso.o obj-$(CONFIG_EFI) += efi.o - -ifneq ($(CONFIG_ARCH_EBSA110),y) - obj-y += io.o -endif obj-$(CONFIG_PARAVIRT) += paravirt.o -head-y := head$(MMUEXT).o +obj-y += head$(MMUEXT).o obj-$(CONFIG_DEBUG_LL) += debug.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o +obj-$(CONFIG_ARM_PATCH_PHYS_VIRT) += phys2virt.o # This is executed very early using a temporary stack when no memory allocator # nor global data is available. Everything has to be allocated on the stack. @@ -99,11 +97,12 @@ CFLAGS_head-inflate-data.o := $(call cc-option,-Wframe-larger-than=10240) obj-$(CONFIG_XIP_DEFLATED_DATA) += head-inflate-data.o obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o -AFLAGS_hyp-stub.o :=-Wa,-march=armv7-a ifeq ($(CONFIG_ARM_PSCI),y) obj-$(CONFIG_SMP) += psci_smp.o endif obj-$(CONFIG_HAVE_ARM_SMCCC) += smccc-call.o -extra-y := $(head-y) vmlinux.lds +obj-$(CONFIG_GENERIC_CPU_VULNERABILITIES) += spectre.o + +extra-y := vmlinux.lds diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index 98bdea51089d..82e96ac83684 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -7,7 +7,6 @@ #include <linux/export.h> #include <linux/sched.h> #include <linux/string.h> -#include <linux/cryptohash.h> #include <linux/delay.h> #include <linux/in6.h> #include <linux/syscalls.h> diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index c773b829ee8e..4915662842ff 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -11,21 +11,22 @@ #include <linux/sched.h> #include <linux/mm.h> #include <linux/dma-mapping.h> -#ifdef CONFIG_KVM_ARM_HOST -#include <linux/kvm_host.h> -#endif #include <asm/cacheflush.h> +#include <asm/kexec-internal.h> #include <asm/glue-df.h> #include <asm/glue-pf.h> #include <asm/mach/arch.h> #include <asm/thread_info.h> -#include <asm/memory.h> +#include <asm/page.h> #include <asm/mpu.h> #include <asm/procinfo.h> #include <asm/suspend.h> -#include <asm/vdso_datapage.h> #include <asm/hardware/cache-l2x0.h> #include <linux/kbuild.h> +#include <linux/arm-smccc.h> + +#include <vdso/datapage.h> + #include "signal.h" /* @@ -34,15 +35,6 @@ #if defined(__APCS_26__) #error Sorry, your compiler targets APCS-26 but this kernel requires APCS-32 #endif -/* - * GCC 4.8.0-4.8.2: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58854 - * miscompiles find_get_entry(), and can result in EXT3 and EXT4 - * filesystem corruption (possibly other FS too). - */ -#if defined(GCC_VERSION) && GCC_VERSION >= 40800 && GCC_VERSION < 40803 -#error Your compiler is too buggy; it is known to miscompile kernels -#error and result in filesystem corruption and oopses. -#endif int main(void) { @@ -53,12 +45,10 @@ int main(void) BLANK(); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); - DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); - DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain)); DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context)); - DEFINE(TI_USED_CP, offsetof(struct thread_info, used_cp)); + DEFINE(TI_ABI_SYSCALL, offsetof(struct thread_info, abi_syscall)); DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value)); DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate)); #ifdef CONFIG_VFP @@ -67,19 +57,13 @@ int main(void) DEFINE(VFP_CPU, offsetof(union vfp_state, hard.cpu)); #endif #endif + DEFINE(SOFTIRQ_DISABLE_OFFSET,SOFTIRQ_DISABLE_OFFSET); #ifdef CONFIG_ARM_THUMBEE DEFINE(TI_THUMBEE_STATE, offsetof(struct thread_info, thumbee_state)); #endif #ifdef CONFIG_IWMMXT DEFINE(TI_IWMMXT_STATE, offsetof(struct thread_info, fpstate.iwmmxt)); #endif -#ifdef CONFIG_CRUNCH - DEFINE(TI_CRUNCH_STATE, offsetof(struct thread_info, crunchstate)); -#endif -#ifdef CONFIG_STACKPROTECTOR_PER_TASK - DEFINE(TI_STACK_CANARY, offsetof(struct thread_info, stack_canary)); -#endif - DEFINE(THREAD_SZ_ORDER, THREAD_SIZE_ORDER); BLANK(); DEFINE(S_R0, offsetof(struct pt_regs, ARM_r0)); DEFINE(S_R1, offsetof(struct pt_regs, ARM_r1)); @@ -101,7 +85,6 @@ int main(void) DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0)); DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs)); DEFINE(SVC_DACR, offsetof(struct svc_pt_regs, dacr)); - DEFINE(SVC_ADDR_LIMIT, offsetof(struct svc_pt_regs, addr_limit)); DEFINE(SVC_REGS_SIZE, sizeof(struct svc_pt_regs)); BLANK(); DEFINE(SIGFRAME_RC3_OFFSET, offsetof(struct sigframe, retcode[3])); @@ -159,6 +142,8 @@ int main(void) DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys)); DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash)); #endif + DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id)); + DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state)); BLANK(); DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); @@ -167,14 +152,6 @@ int main(void) DEFINE(CACHE_WRITEBACK_ORDER, __CACHE_WRITEBACK_ORDER); DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE); BLANK(); -#ifdef CONFIG_KVM_ARM_HOST - DEFINE(VCPU_GUEST_CTXT, offsetof(struct kvm_vcpu, arch.ctxt)); - DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); - DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp)); - DEFINE(CPU_CTXT_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); - DEFINE(GP_REGS_USR, offsetof(struct kvm_regs, usr_regs)); -#endif - BLANK(); #ifdef CONFIG_VDSO DEFINE(VDSO_DATA_SIZE, sizeof(union vdso_data_store)); #endif @@ -190,5 +167,9 @@ int main(void) DEFINE(MPU_RGN_PRBAR, offsetof(struct mpu_rgn, prbar)); DEFINE(MPU_RGN_PRLAR, offsetof(struct mpu_rgn, prlar)); #endif + DEFINE(KEXEC_START_ADDR, offsetof(struct kexec_relocate_data, kexec_start_address)); + DEFINE(KEXEC_INDIR_PAGE, offsetof(struct kexec_relocate_data, kexec_indirection_page)); + DEFINE(KEXEC_MACH_TYPE, offsetof(struct kexec_relocate_data, kexec_mach_type)); + DEFINE(KEXEC_R2, offsetof(struct kexec_relocate_data, kexec_r2)); return 0; } diff --git a/arch/arm/kernel/atags.h b/arch/arm/kernel/atags.h index 067e12edc341..f2819c25b602 100644 --- a/arch/arm/kernel/atags.h +++ b/arch/arm/kernel/atags.h @@ -2,11 +2,11 @@ void convert_to_tag_list(struct tag *tags); #ifdef CONFIG_ATAGS -const struct machine_desc *setup_machine_tags(phys_addr_t __atags_pointer, +const struct machine_desc *setup_machine_tags(void *__atags_vaddr, unsigned int machine_nr); #else static inline const struct machine_desc * __init __noreturn -setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr) +setup_machine_tags(void *__atags_vaddr, unsigned int machine_nr) { early_print("no ATAGS support: can't continue\n"); while (true); diff --git a/arch/arm/kernel/atags_parse.c b/arch/arm/kernel/atags_parse.c index ce02f92f4ab2..4ec591bde3df 100644 --- a/arch/arm/kernel/atags_parse.c +++ b/arch/arm/kernel/atags_parse.c @@ -69,18 +69,18 @@ static int __init parse_tag_mem32(const struct tag *tag) __tagtable(ATAG_MEM, parse_tag_mem32); -#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) +#if defined(CONFIG_ARCH_FOOTBRIDGE) && defined(CONFIG_VGA_CONSOLE) static int __init parse_tag_videotext(const struct tag *tag) { - screen_info.orig_x = tag->u.videotext.x; - screen_info.orig_y = tag->u.videotext.y; - screen_info.orig_video_page = tag->u.videotext.video_page; - screen_info.orig_video_mode = tag->u.videotext.video_mode; - screen_info.orig_video_cols = tag->u.videotext.video_cols; - screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx; - screen_info.orig_video_lines = tag->u.videotext.video_lines; - screen_info.orig_video_isVGA = tag->u.videotext.video_isvga; - screen_info.orig_video_points = tag->u.videotext.video_points; + vgacon_screen_info.orig_x = tag->u.videotext.x; + vgacon_screen_info.orig_y = tag->u.videotext.y; + vgacon_screen_info.orig_video_page = tag->u.videotext.video_page; + vgacon_screen_info.orig_video_mode = tag->u.videotext.video_mode; + vgacon_screen_info.orig_video_cols = tag->u.videotext.video_cols; + vgacon_screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx; + vgacon_screen_info.orig_video_lines = tag->u.videotext.video_lines; + vgacon_screen_info.orig_video_isVGA = tag->u.videotext.video_isvga; + vgacon_screen_info.orig_video_points = tag->u.videotext.video_points; return 0; } @@ -91,8 +91,6 @@ __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext); static int __init parse_tag_ramdisk(const struct tag *tag) { rd_image_start = tag->u.ramdisk.start; - rd_doload = (tag->u.ramdisk.flags & 1) == 0; - rd_prompt = (tag->u.ramdisk.flags & 2) == 0; if (tag->u.ramdisk.size) rd_size = tag->u.ramdisk.size; @@ -129,7 +127,7 @@ static int __init parse_tag_cmdline(const struct tag *tag) #elif defined(CONFIG_CMDLINE_FORCE) pr_warn("Ignoring tag cmdline (using the default kernel command line)\n"); #else - strlcpy(default_command_line, tag->u.cmdline.cmdline, + strscpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE); #endif return 0; @@ -176,7 +174,7 @@ static void __init squash_mem_tags(struct tag *tag) } const struct machine_desc * __init -setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr) +setup_machine_tags(void *atags_vaddr, unsigned int machine_nr) { struct tag *tags = (struct tag *)&default_tags; const struct machine_desc *mdesc = NULL, *p; @@ -197,8 +195,8 @@ setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr) if (!mdesc) return NULL; - if (__atags_pointer) - tags = phys_to_virt(__atags_pointer); + if (atags_vaddr) + tags = atags_vaddr; else if (mdesc->atag_offset) tags = (void *)(PAGE_OFFSET + mdesc->atag_offset); @@ -226,7 +224,7 @@ setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr) } /* parse_early_param needs a boot_command_line */ - strlcpy(boot_command_line, from, COMMAND_LINE_SIZE); + strscpy(boot_command_line, from, COMMAND_LINE_SIZE); return mdesc; } diff --git a/arch/arm/kernel/atags_proc.c b/arch/arm/kernel/atags_proc.c index 312cb89ec364..cd09f8ab93e3 100644 --- a/arch/arm/kernel/atags_proc.c +++ b/arch/arm/kernel/atags_proc.c @@ -7,19 +7,19 @@ struct buffer { size_t size; - char data[]; + char data[] __counted_by(size); }; static ssize_t atags_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - struct buffer *b = PDE_DATA(file_inode(file)); + struct buffer *b = pde_data(file_inode(file)); return simple_read_from_buffer(buf, count, ppos, b->data, b->size); } -static const struct file_operations atags_fops = { - .read = atags_read, - .llseek = default_llseek, +static const struct proc_ops atags_proc_ops = { + .proc_read = atags_read, + .proc_lseek = default_llseek, }; #define BOOT_PARAMS_SIZE 1536 @@ -42,7 +42,7 @@ static int __init init_atags_procfs(void) size_t size; if (tag->hdr.tag != ATAG_CORE) { - pr_info("No ATAGs?"); + pr_info("No ATAGs?\n"); return -EINVAL; } @@ -54,14 +54,14 @@ static int __init init_atags_procfs(void) WARN_ON(tag->hdr.tag != ATAG_NONE); - b = kmalloc(sizeof(*b) + size, GFP_KERNEL); + b = kmalloc(struct_size(b, data, size), GFP_KERNEL); if (!b) goto nomem; b->size = size; memcpy(b->data, atags_copy, size); - tags_entry = proc_create_data("atags", 0400, NULL, &atags_fops, b); + tags_entry = proc_create_data("atags", 0400, NULL, &atags_proc_ops, b); if (!tags_entry) goto nomem; diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index ed46ca69813d..d334c7fb672b 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -142,15 +142,15 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_89C940F, */ static void pci_fixup_dec21285(struct pci_dev *dev) { - int i; - if (dev->devfn == 0) { + struct resource *r; + dev->class &= 0xff; dev->class |= PCI_CLASS_BRIDGE_HOST << 8; - for (i = 0; i < PCI_NUM_RESOURCES; i++) { - dev->resource[i].start = 0; - dev->resource[i].end = 0; - dev->resource[i].flags = 0; + pci_dev_for_each_resource(dev, r) { + r->start = 0; + r->end = 0; + r->flags = 0; } } } @@ -162,13 +162,11 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, pci_fixup_d static void pci_fixup_ide_bases(struct pci_dev *dev) { struct resource *r; - int i; if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE) return; - for (i = 0; i < PCI_NUM_RESOURCES; i++) { - r = dev->resource + i; + pci_dev_for_each_resource(dev, r) { if ((r->start & ~0x80) == 0x374) { r->start |= 2; r->end = r->start; @@ -252,23 +250,6 @@ static void pci_fixup_cy82c693(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, pci_fixup_cy82c693); -static void pci_fixup_it8152(struct pci_dev *dev) -{ - int i; - /* fixup for ITE 8152 devices */ - /* FIXME: add defines for class 0x68000 and 0x80103 */ - if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST || - dev->class == 0x68000 || - dev->class == 0x80103) { - for (i = 0; i < PCI_NUM_RESOURCES; i++) { - dev->resource[i].start = 0; - dev->resource[i].end = 0; - dev->resource[i].flags = 0; - } - } -} -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8152, pci_fixup_it8152); - /* * If the bus contains any of these devices, then we must not turn on * parity checking of any kind. Currently this is CyberPro 20x0 only. @@ -411,8 +392,7 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) return irq; } -static int pcibios_init_resource(int busnr, struct pci_sys_data *sys, - int io_optional) +static int pcibios_init_resource(int busnr, struct pci_sys_data *sys) { int ret; struct resource_entry *window; @@ -422,14 +402,6 @@ static int pcibios_init_resource(int busnr, struct pci_sys_data *sys, &iomem_resource, sys->mem_offset); } - /* - * If a platform says I/O port support is optional, we don't add - * the default I/O space. The platform is responsible for adding - * any I/O space it needs. - */ - if (io_optional) - return 0; - resource_list_for_each_entry(window, &sys->resources) if (resource_type(window->res) == IORESOURCE_IO) return 0; @@ -479,7 +451,7 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, if (ret > 0) { - ret = pcibios_init_resource(nr, sys, hw->io_optional); + ret = pcibios_init_resource(nr, sys); if (ret) { pci_free_host_bridge(bridge); break; @@ -497,9 +469,6 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, bridge->sysdata = sys; bridge->busnr = sys->busnr; bridge->ops = hw->ops; - bridge->msi = hw->msi_ctrl; - bridge->align_resource = - hw->align_resource; ret = pci_scan_root_bus_bridge(bridge); } diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c index 14c8dbbb7d2d..087bce6ec8e9 100644 --- a/arch/arm/kernel/bugs.c +++ b/arch/arm/kernel/bugs.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/init.h> +#include <linux/cpu.h> #include <asm/bugs.h> #include <asm/proc-fns.h> @@ -11,7 +12,7 @@ void check_other_bugs(void) #endif } -void __init check_bugs(void) +void __init arch_cpu_finalize_init(void) { check_writebuffer_bugs(); check_other_bugs(); diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c index 093368e0d020..fba1f8bb03b5 100644 --- a/arch/arm/kernel/cpuidle.c +++ b/arch/arm/kernel/cpuidle.c @@ -5,13 +5,12 @@ #include <linux/cpuidle.h> #include <linux/of.h> -#include <linux/of_device.h> #include <asm/cpuidle.h> extern struct of_cpuidle_method __cpuidle_method_of_table[]; static const struct of_cpuidle_method __cpuidle_method_of_table_sentinel - __used __section(__cpuidle_method_of_table_end); + __used __section("__cpuidle_method_of_table_end"); static struct cpuidle_ops cpuidle_ops[NR_CPUS] __ro_after_init; @@ -26,8 +25,8 @@ static struct cpuidle_ops cpuidle_ops[NR_CPUS] __ro_after_init; * * Returns the index passed as parameter */ -int arm_cpuidle_simple_enter(struct cpuidle_device *dev, - struct cpuidle_driver *drv, int index) +__cpuidle int arm_cpuidle_simple_enter(struct cpuidle_device *dev, struct + cpuidle_driver *drv, int index) { cpu_do_idle(); diff --git a/arch/arm/kernel/crash_dump.c b/arch/arm/kernel/crash_dump.c index 53cb92435392..938bd932df9a 100644 --- a/arch/arm/kernel/crash_dump.c +++ b/arch/arm/kernel/crash_dump.c @@ -14,22 +14,10 @@ #include <linux/crash_dump.h> #include <linux/uaccess.h> #include <linux/io.h> +#include <linux/uio.h> -/** - * copy_oldmem_page() - copy one page from old kernel memory - * @pfn: page frame number to be copied - * @buf: buffer where the copied page is placed - * @csize: number of bytes to copy - * @offset: offset in bytes into the page - * @userbuf: if set, @buf is int he user address space - * - * This function copies one page from old kernel memory into buffer pointed by - * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes - * copied or negative error in case of failure. - */ -ssize_t copy_oldmem_page(unsigned long pfn, char *buf, - size_t csize, unsigned long offset, - int userbuf) +ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, + size_t csize, unsigned long offset) { void *vaddr; @@ -40,14 +28,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, if (!vaddr) return -ENOMEM; - if (userbuf) { - if (copy_to_user(buf, vaddr + offset, csize)) { - iounmap(vaddr); - return -EFAULT; - } - } else { - memcpy(buf, vaddr + offset, csize); - } + csize = copy_to_iter(vaddr + offset, csize, iter); iounmap(vaddr); return csize; diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index e112072b579d..d92f44bdf438 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S @@ -89,11 +89,18 @@ ENTRY(printascii) 2: teq r1, #'\n' bne 3f mov r1, #'\r' - waituart r2, r3 +#ifdef CONFIG_DEBUG_UART_FLOW_CONTROL + waituartcts r2, r3 +#endif + waituarttxrdy r2, r3 senduart r1, r3 busyuart r2, r3 mov r1, #'\n' -3: waituart r2, r3 +3: +#ifdef CONFIG_DEBUG_UART_FLOW_CONTROL + waituartcts r2, r3 +#endif + waituarttxrdy r2, r3 senduart r1, r3 busyuart r2, r3 b 1b diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index 39c978698406..fdb74e64206a 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c @@ -13,7 +13,6 @@ #include <linux/of.h> #include <linux/of_fdt.h> #include <linux/of_irq.h> -#include <linux/of_platform.h> #include <linux/smp.h> #include <asm/cputype.h> @@ -29,7 +28,7 @@ extern struct of_cpu_method __cpu_method_of_table[]; static const struct of_cpu_method __cpu_method_of_table_sentinel - __used __section(__cpu_method_of_table_end); + __used __section("__cpu_method_of_table_end"); static int __init set_smp_ops_by_method(struct device_node *node) @@ -84,33 +83,15 @@ void __init arm_dt_init_cpu_maps(void) return; for_each_of_cpu_node(cpu) { - const __be32 *cell; - int prop_bytes; - u32 hwid; + u32 hwid = of_get_cpu_hwid(cpu, 0); pr_debug(" * %pOF...\n", cpu); - /* - * A device tree containing CPU nodes with missing "reg" - * properties is considered invalid to build the - * cpu_logical_map. - */ - cell = of_get_property(cpu, "reg", &prop_bytes); - if (!cell || prop_bytes < sizeof(*cell)) { - pr_debug(" * %pOF missing reg property\n", cpu); - of_node_put(cpu); - return; - } /* * Bits n:24 must be set to 0 in the DT since the reg property * defines the MPIDR[23:0]. */ - do { - hwid = be32_to_cpu(*cell++); - prop_bytes -= sizeof(*cell); - } while (!hwid && prop_bytes > 0); - - if (prop_bytes || (hwid & ~MPIDR_HWID_BITMASK)) { + if (hwid & ~MPIDR_HWID_BITMASK) { of_node_put(cpu); return; } @@ -203,25 +184,23 @@ static const void * __init arch_get_next_mach(const char *const **match) /** * setup_machine_fdt - Machine setup when an dtb was passed to the kernel - * @dt_phys: physical address of dt blob + * @dt_virt: virtual address of dt blob * * If a dtb was passed to the kernel in r2, then use it to choose the * correct machine_desc and to setup the system. */ -const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys) +const struct machine_desc * __init setup_machine_fdt(void *dt_virt) { const struct machine_desc *mdesc, *mdesc_best = NULL; -#if defined(CONFIG_ARCH_MULTIPLATFORM) || defined(CONFIG_ARM_SINGLE_ARMV7M) DT_MACHINE_START(GENERIC_DT, "Generic DT based system") .l2c_aux_val = 0x0, .l2c_aux_mask = ~0x0, MACHINE_END mdesc_best = &__mach_desc_GENERIC_DT; -#endif - if (!dt_phys || !early_init_dt_verify(phys_to_virt(dt_phys))) + if (!dt_virt || !early_init_dt_verify(dt_virt)) return NULL; mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach); diff --git a/arch/arm/kernel/dma-isa.c b/arch/arm/kernel/dma-isa.c deleted file mode 100644 index 2d90ecce5a11..000000000000 --- a/arch/arm/kernel/dma-isa.c +++ /dev/null @@ -1,225 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/arch/arm/kernel/dma-isa.c - * - * Copyright (C) 1999-2000 Russell King - * - * ISA DMA primitives - * Taken from various sources, including: - * linux/include/asm/dma.h: Defines for using and allocating dma channels. - * Written by Hennus Bergman, 1992. - * High DMA channel support & info by Hannu Savolainen and John Boyd, - * Nov. 1992. - * arch/arm/kernel/dma-ebsa285.c - * Copyright (C) 1998 Phil Blundell - */ -#include <linux/ioport.h> -#include <linux/init.h> -#include <linux/dma-mapping.h> -#include <linux/io.h> - -#include <asm/dma.h> -#include <asm/mach/dma.h> - -#define ISA_DMA_MASK 0 -#define ISA_DMA_MODE 1 -#define ISA_DMA_CLRFF 2 -#define ISA_DMA_PGHI 3 -#define ISA_DMA_PGLO 4 -#define ISA_DMA_ADDR 5 -#define ISA_DMA_COUNT 6 - -static unsigned int isa_dma_port[8][7] = { - /* MASK MODE CLRFF PAGE_HI PAGE_LO ADDR COUNT */ - { 0x0a, 0x0b, 0x0c, 0x487, 0x087, 0x00, 0x01 }, - { 0x0a, 0x0b, 0x0c, 0x483, 0x083, 0x02, 0x03 }, - { 0x0a, 0x0b, 0x0c, 0x481, 0x081, 0x04, 0x05 }, - { 0x0a, 0x0b, 0x0c, 0x482, 0x082, 0x06, 0x07 }, - { 0xd4, 0xd6, 0xd8, 0x000, 0x000, 0xc0, 0xc2 }, - { 0xd4, 0xd6, 0xd8, 0x48b, 0x08b, 0xc4, 0xc6 }, - { 0xd4, 0xd6, 0xd8, 0x489, 0x089, 0xc8, 0xca }, - { 0xd4, 0xd6, 0xd8, 0x48a, 0x08a, 0xcc, 0xce } -}; - -static int isa_get_dma_residue(unsigned int chan, dma_t *dma) -{ - unsigned int io_port = isa_dma_port[chan][ISA_DMA_COUNT]; - int count; - - count = 1 + inb(io_port); - count |= inb(io_port) << 8; - - return chan < 4 ? count : (count << 1); -} - -static struct device isa_dma_dev = { - .init_name = "fallback device", - .coherent_dma_mask = ~(dma_addr_t)0, - .dma_mask = &isa_dma_dev.coherent_dma_mask, -}; - -static void isa_enable_dma(unsigned int chan, dma_t *dma) -{ - if (dma->invalid) { - unsigned long address, length; - unsigned int mode; - enum dma_data_direction direction; - - mode = (chan & 3) | dma->dma_mode; - switch (dma->dma_mode & DMA_MODE_MASK) { - case DMA_MODE_READ: - direction = DMA_FROM_DEVICE; - break; - - case DMA_MODE_WRITE: - direction = DMA_TO_DEVICE; - break; - - case DMA_MODE_CASCADE: - direction = DMA_BIDIRECTIONAL; - break; - - default: - direction = DMA_NONE; - break; - } - - if (!dma->sg) { - /* - * Cope with ISA-style drivers which expect cache - * coherence. - */ - dma->sg = &dma->buf; - dma->sgcount = 1; - dma->buf.length = dma->count; - dma->buf.dma_address = dma_map_single(&isa_dma_dev, - dma->addr, dma->count, - direction); - } - - address = dma->buf.dma_address; - length = dma->buf.length - 1; - - outb(address >> 16, isa_dma_port[chan][ISA_DMA_PGLO]); - outb(address >> 24, isa_dma_port[chan][ISA_DMA_PGHI]); - - if (chan >= 4) { - address >>= 1; - length >>= 1; - } - - outb(0, isa_dma_port[chan][ISA_DMA_CLRFF]); - - outb(address, isa_dma_port[chan][ISA_DMA_ADDR]); - outb(address >> 8, isa_dma_port[chan][ISA_DMA_ADDR]); - - outb(length, isa_dma_port[chan][ISA_DMA_COUNT]); - outb(length >> 8, isa_dma_port[chan][ISA_DMA_COUNT]); - - outb(mode, isa_dma_port[chan][ISA_DMA_MODE]); - dma->invalid = 0; - } - outb(chan & 3, isa_dma_port[chan][ISA_DMA_MASK]); -} - -static void isa_disable_dma(unsigned int chan, dma_t *dma) -{ - outb(chan | 4, isa_dma_port[chan][ISA_DMA_MASK]); -} - -static struct dma_ops isa_dma_ops = { - .type = "ISA", - .enable = isa_enable_dma, - .disable = isa_disable_dma, - .residue = isa_get_dma_residue, -}; - -static struct resource dma_resources[] = { { - .name = "dma1", - .start = 0x0000, - .end = 0x000f -}, { - .name = "dma low page", - .start = 0x0080, - .end = 0x008f -}, { - .name = "dma2", - .start = 0x00c0, - .end = 0x00df -}, { - .name = "dma high page", - .start = 0x0480, - .end = 0x048f -} }; - -static dma_t isa_dma[8]; - -/* - * ISA DMA always starts at channel 0 - */ -void __init isa_init_dma(void) -{ - /* - * Try to autodetect presence of an ISA DMA controller. - * We do some minimal initialisation, and check that - * channel 0's DMA address registers are writeable. - */ - outb(0xff, 0x0d); - outb(0xff, 0xda); - - /* - * Write high and low address, and then read them back - * in the same order. - */ - outb(0x55, 0x00); - outb(0xaa, 0x00); - - if (inb(0) == 0x55 && inb(0) == 0xaa) { - unsigned int chan, i; - - for (chan = 0; chan < 8; chan++) { - isa_dma[chan].d_ops = &isa_dma_ops; - isa_disable_dma(chan, NULL); - } - - outb(0x40, 0x0b); - outb(0x41, 0x0b); - outb(0x42, 0x0b); - outb(0x43, 0x0b); - - outb(0xc0, 0xd6); - outb(0x41, 0xd6); - outb(0x42, 0xd6); - outb(0x43, 0xd6); - - outb(0, 0xd4); - - outb(0x10, 0x08); - outb(0x10, 0xd0); - - /* - * Is this correct? According to my documentation, it - * doesn't appear to be. It should be: - * outb(0x3f, 0x40b); outb(0x3f, 0x4d6); - */ - outb(0x30, 0x40b); - outb(0x31, 0x40b); - outb(0x32, 0x40b); - outb(0x33, 0x40b); - outb(0x31, 0x4d6); - outb(0x32, 0x4d6); - outb(0x33, 0x4d6); - - for (i = 0; i < ARRAY_SIZE(dma_resources); i++) - request_resource(&ioport_resource, dma_resources + i); - - for (chan = 0; chan < 8; chan++) { - int ret = isa_dma_add(chan, &isa_dma[chan]); - if (ret) - pr_err("ISADMA%u: unable to register: %d\n", - chan, ret); - } - - request_dma(DMA_ISA_CASCADE, "cascade"); - } -} diff --git a/arch/arm/kernel/efi.c b/arch/arm/kernel/efi.c index e57dbcc89123..6f9ec7d28a71 100644 --- a/arch/arm/kernel/efi.c +++ b/arch/arm/kernel/efi.c @@ -4,6 +4,9 @@ */ #include <linux/efi.h> +#include <linux/memblock.h> +#include <linux/screen_info.h> + #include <asm/efi.h> #include <asm/mach/map.h> #include <asm/mmu_context.h> @@ -22,7 +25,8 @@ static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data) } int __init efi_set_mapping_permissions(struct mm_struct *mm, - efi_memory_desc_t *md) + efi_memory_desc_t *md, + bool ignored) { unsigned long base, size; @@ -70,6 +74,57 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) * If stricter permissions were specified, apply them now. */ if (md->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP)) - return efi_set_mapping_permissions(mm, md); + return efi_set_mapping_permissions(mm, md, false); return 0; } + +static unsigned long __initdata cpu_state_table = EFI_INVALID_TABLE_ADDR; + +const efi_config_table_type_t efi_arch_tables[] __initconst = { + {LINUX_EFI_ARM_CPU_STATE_TABLE_GUID, &cpu_state_table}, + {} +}; + +static void __init load_cpu_state_table(void) +{ + if (cpu_state_table != EFI_INVALID_TABLE_ADDR) { + struct efi_arm_entry_state *state; + bool dump_state = true; + + state = early_memremap_ro(cpu_state_table, + sizeof(struct efi_arm_entry_state)); + if (state == NULL) { + pr_warn("Unable to map CPU entry state table.\n"); + return; + } + + if ((state->sctlr_before_ebs & 1) == 0) + pr_warn(FW_BUG "EFI stub was entered with MMU and Dcache disabled, please fix your firmware!\n"); + else if ((state->sctlr_after_ebs & 1) == 0) + pr_warn(FW_BUG "ExitBootServices() returned with MMU and Dcache disabled, please fix your firmware!\n"); + else + dump_state = false; + + if (dump_state || efi_enabled(EFI_DBG)) { + pr_info("CPSR at EFI stub entry : 0x%08x\n", + state->cpsr_before_ebs); + pr_info("SCTLR at EFI stub entry : 0x%08x\n", + state->sctlr_before_ebs); + pr_info("CPSR after ExitBootServices() : 0x%08x\n", + state->cpsr_after_ebs); + pr_info("SCTLR after ExitBootServices(): 0x%08x\n", + state->sctlr_after_ebs); + } + early_memunmap(state, sizeof(struct efi_arm_entry_state)); + } +} + +void __init arm_efi_init(void) +{ + efi_init(); + + /* ARM does not permit early mappings to persist across paging_init() */ + efi_memmap_unmap(); + + load_cpu_state_table(); +} diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c index 182422981386..254ab7138c85 100644 --- a/arch/arm/kernel/elf.c +++ b/arch/arm/kernel/elf.c @@ -78,13 +78,32 @@ void elf_set_personality(const struct elf32_hdr *x) EXPORT_SYMBOL(elf_set_personality); /* - * Set READ_IMPLIES_EXEC if: - * - the binary requires an executable stack - * - we're running on a CPU which doesn't support NX. + * An executable for which elf_read_implies_exec() returns TRUE will + * have the READ_IMPLIES_EXEC personality flag set automatically. + * + * The decision process for determining the results are: + * + *        CPU: | lacks NX*  | has NX | + * ELF:        |       |      | + * ---------------------|------------|------------| + * missing PT_GNU_STACK | exec-all  | exec-all  | + * PT_GNU_STACK == RWX  | exec-all  | exec-stack | + * PT_GNU_STACK == RW  | exec-all  | exec-none | + * + * exec-all : all PROT_READ user mappings are executable, except when + * backed by files on a noexec-filesystem. + * exec-none : only PROT_EXEC user mappings are executable. + * exec-stack: only the stack and PROT_EXEC user mappings are executable. + * + * *this column has no architectural effect: NX markings are ignored by + * hardware, but may have behavioral effects when "wants X" collides with + * "cannot be X" constraints in memory permission flags, as in + * https://lkml.kernel.org/r/20190418055759.GA3155@mellanox.com + * */ int arm_elf_read_implies_exec(int executable_stack) { - if (executable_stack != EXSTACK_DISABLE_X) + if (executable_stack == EXSTACK_DEFAULT) return 1; if (cpu_architecture() < CPU_ARCH_ARMv6) return 1; diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 858d4e541532..6150a716828c 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -15,44 +15,54 @@ #include <linux/init.h> #include <asm/assembler.h> -#include <asm/memory.h> +#include <asm/page.h> #include <asm/glue-df.h> #include <asm/glue-pf.h> #include <asm/vfpmacros.h> -#ifndef CONFIG_GENERIC_IRQ_MULTI_HANDLER -#include <mach/entry-macro.S> -#endif #include <asm/thread_notify.h> #include <asm/unwind.h> #include <asm/unistd.h> #include <asm/tls.h> #include <asm/system_info.h> +#include <asm/uaccess-asm.h> #include "entry-header.S" -#include <asm/entry-macro-multi.S> #include <asm/probes.h> /* * Interrupt handling. */ - .macro irq_handler -#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER - ldr r1, =handle_arch_irq - mov r0, sp - badr lr, 9997f - ldr pc, [r1] -#else - arch_irq_handler_default + .macro irq_handler, from_user:req + mov r1, sp + ldr_this_cpu r2, irq_stack_ptr, r2, r3 + .if \from_user == 0 + @ + @ If we took the interrupt while running in the kernel, we may already + @ be using the IRQ stack, so revert to the original value in that case. + @ + subs r3, r2, r1 @ SP above bottom of IRQ stack? + rsbscs r3, r3, #THREAD_SIZE @ ... and below the top? +#ifdef CONFIG_VMAP_STACK + ldr_va r3, high_memory, cc @ End of the linear region + cmpcc r3, r1 @ Stack pointer was below it? #endif -9997: + bcc 0f @ If not, switch to the IRQ stack + mov r0, r1 + bl generic_handle_arch_irq + b 1f +0: + .endif + + mov_l r0, generic_handle_arch_irq + bl call_with_stack +1: .endm .macro pabt_helper @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5 #ifdef MULTI_PABORT - ldr ip, .LCprocfns - mov lr, pc - ldr pc, [ip, #PROCESSOR_PABT_FUNC] + ldr_va ip, processor, offset=PROCESSOR_PABT_FUNC + bl_r ip #else bl CPU_PABORT_HANDLER #endif @@ -71,9 +81,8 @@ @ the fault status register in r1. r9 must be preserved. @ #ifdef MULTI_DABORT - ldr ip, .LCprocfns - mov lr, pc - ldr pc, [ip, #PROCESSOR_DABT_FUNC] + ldr_va ip, processor, offset=PROCESSOR_DABT_FUNC + bl_r ip #else bl CPU_DABORT_HANDLER #endif @@ -142,27 +151,35 @@ ENDPROC(__und_invalid) #define SPFIX(code...) #endif - .macro svc_entry, stack_hole=0, trace=1, uaccess=1 + .macro svc_entry, stack_hole=0, trace=1, uaccess=1, overflow_check=1 UNWIND(.fnstart ) - UNWIND(.save {r0 - pc} ) - sub sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4) + sub sp, sp, #(SVC_REGS_SIZE + \stack_hole) + THUMB( add sp, r1 ) @ get SP in a GPR without + THUMB( sub r1, sp, r1 ) @ using a temp register + + .if \overflow_check + UNWIND(.save {r0 - pc} ) + do_overflow_check (SVC_REGS_SIZE + \stack_hole) + .endif + #ifdef CONFIG_THUMB2_KERNEL - SPFIX( str r0, [sp] ) @ temporarily saved - SPFIX( mov r0, sp ) - SPFIX( tst r0, #4 ) @ test original stack alignment - SPFIX( ldr r0, [sp] ) @ restored + tst r1, #4 @ test stack pointer alignment + sub r1, sp, r1 @ restore original R1 + sub sp, r1 @ restore original SP #else SPFIX( tst sp, #4 ) #endif - SPFIX( subeq sp, sp, #4 ) - stmia sp, {r1 - r12} + SPFIX( subne sp, sp, #4 ) + + ARM( stmib sp, {r1 - r12} ) + THUMB( stmia sp, {r0 - r12} ) @ No STMIB in Thumb-2 ldmia r0, {r3 - r5} - add r7, sp, #S_SP - 4 @ here for interlock avoidance + add r7, sp, #S_SP @ here for interlock avoidance mov r6, #-1 @ "" "" "" "" - add r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4) - SPFIX( addeq r2, r2, #4 ) - str r3, [sp, #-4]! @ save the "real" r0 copied + add r2, sp, #(SVC_REGS_SIZE + \stack_hole) + SPFIX( addne r2, r2, #4 ) + str r3, [sp] @ save the "real" r0 copied @ from the exception stack mov r3, lr @@ -179,15 +196,7 @@ ENDPROC(__und_invalid) stmia r7, {r2 - r6} get_thread_info tsk - ldr r0, [tsk, #TI_ADDR_LIMIT] - mov r1, #TASK_SIZE - str r1, [tsk, #TI_ADDR_LIMIT] - str r0, [sp, #SVC_ADDR_LIMIT] - - uaccess_save r0 - .if \uaccess - uaccess_disable r0 - .endif + uaccess_entry tsk, r0, r1, r2, \uaccess .if \trace #ifdef CONFIG_TRACE_IRQFLAGS @@ -209,9 +218,9 @@ ENDPROC(__dabt_svc) .align 5 __irq_svc: svc_entry - irq_handler + irq_handler from_user=0 -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION ldr r8, [tsk, #TI_PREEMPT] @ get preempt count ldr r0, [tsk, #TI_FLAGS] @ get flags teq r8, #0 @ if preempt count != 0 @@ -226,7 +235,7 @@ ENDPROC(__irq_svc) .ltorg -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION svc_preempt: mov r8, lr 1: bl preempt_schedule_irq @ irq en/disable is done inside @@ -259,31 +268,10 @@ __und_svc: #else svc_entry #endif - @ - @ call emulation code, which returns using r9 if it has emulated - @ the instruction, or the more conventional lr if we are to treat - @ this as a real undefined instruction - @ - @ r0 - instruction - @ -#ifndef CONFIG_THUMB2_KERNEL - ldr r0, [r4, #-4] -#else - mov r1, #2 - ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2 - cmp r0, #0xe800 @ 32-bit instruction if xx >= 0 - blo __und_svc_fault - ldrh r9, [r4] @ bottom 16 bits - add r4, r4, #2 - str r4, [sp, #S_PC] - orr r0, r9, r0, lsl #16 -#endif - badr r9, __und_svc_finish - mov r2, r4 - bl call_fpe mov r1, #4 @ PC correction to apply -__und_svc_fault: + THUMB( tst r5, #PSR_T_BIT ) @ exception taken in Thumb mode? + THUMB( movne r1, #2 ) @ if so, fix up PC correction mov r0, sp @ struct pt_regs *regs bl __und_fault @@ -312,16 +300,6 @@ __fiq_svc: UNWIND(.fnend ) ENDPROC(__fiq_svc) - .align 5 -.LCcralign: - .word cr_alignment -#ifdef MULTI_DABORT -.LCprocfns: - .word processor -#endif -.LCfp: - .word fp_enter - /* * Abort mode handlers */ @@ -380,7 +358,7 @@ ENDPROC(__fiq_abt) THUMB( stmia sp, {r0 - r12} ) ATRAP( mrc p15, 0, r7, c1, c0, 0) - ATRAP( ldr r8, .LCcralign) + ATRAP( ldr_va r8, cr_alignment) ldmia r0, {r3 - r5} add r0, sp, #S_PC @ here for interlock avoidance @@ -389,8 +367,6 @@ ENDPROC(__fiq_abt) str r3, [sp] @ save the "real" r0 copied @ from the exception stack - ATRAP( ldr r8, [r8, #0]) - @ @ We are now ready to fill in the remaining blanks on the stack: @ @@ -412,6 +388,8 @@ ENDPROC(__fiq_abt) ATRAP( teq r8, r7) ATRAP( mcrne p15, 0, r8, c1, c0, 0) + reload_current r7, r8 + @ @ Clear FP to mark the first stack frame @ @@ -434,7 +412,8 @@ ENDPROC(__fiq_abt) @ if it was interrupted in a critical region. Here we @ perform a quick test inline since it should be false @ 99.9999% of the time. The rest is done out of line. - cmp r4, #TASK_SIZE + ldr r0, =TASK_SIZE + cmp r4, r0 blhs kuser_cmpxchg64_fixup #endif #endif @@ -454,7 +433,7 @@ ENDPROC(__dabt_usr) __irq_usr: usr_entry kuser_cmpxchg_check - irq_handler + irq_handler from_user=1 get_thread_info tsk mov why, #0 b ret_to_user_from_irq @@ -467,275 +446,26 @@ ENDPROC(__irq_usr) __und_usr: usr_entry uaccess=0 - mov r2, r4 - mov r3, r5 - - @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the - @ faulting instruction depending on Thumb mode. - @ r3 = regs->ARM_cpsr - @ - @ The emulation code returns using r9 if it has emulated the - @ instruction, or the more conventional lr if we are to treat - @ this as a real undefined instruction - @ - badr r9, ret_from_exception - @ IRQs must be enabled before attempting to read the instruction from @ user space since that could cause a page/translation fault if the @ page table was modified by another CPU. enable_irq - tst r3, #PSR_T_BIT @ Thumb mode? - bne __und_usr_thumb - sub r4, r2, #4 @ ARM instr at LR - 4 -1: ldrt r0, [r4] - ARM_BE8(rev r0, r0) @ little endian instruction - - uaccess_disable ip - - @ r0 = 32-bit ARM instruction which caused the exception - @ r2 = PC value for the following instruction (:= regs->ARM_pc) - @ r4 = PC value for the faulting instruction - @ lr = 32-bit undefined instruction function - badr lr, __und_usr_fault_32 - b call_fpe - -__und_usr_thumb: - @ Thumb instruction - sub r4, r2, #2 @ First half of thumb instr at LR - 2 -#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 -/* - * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms - * can never be supported in a single kernel, this code is not applicable at - * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be - * made about .arch directives. - */ -#if __LINUX_ARM_ARCH__ < 7 -/* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */ -#define NEED_CPU_ARCHITECTURE - ldr r5, .LCcpu_architecture - ldr r5, [r5] - cmp r5, #CPU_ARCH_ARMv7 - blo __und_usr_fault_16 @ 16bit undefined instruction -/* - * The following code won't get run unless the running CPU really is v7, so - * coding round the lack of ldrht on older arches is pointless. Temporarily - * override the assembler target arch with the minimum required instead: - */ - .arch armv6t2 + tst r5, #PSR_T_BIT @ Thumb mode? + mov r1, #2 @ set insn size to 2 for Thumb + bne 0f @ handle as Thumb undef exception +#ifdef CONFIG_FPE_NWFPE + adr r9, ret_from_exception + bl call_fpe @ returns via R9 on success #endif -2: ldrht r5, [r4] -ARM_BE8(rev16 r5, r5) @ little endian instruction - cmp r5, #0xe800 @ 32bit instruction if xx != 0 - blo __und_usr_fault_16_pan @ 16bit undefined instruction -3: ldrht r0, [r2] -ARM_BE8(rev16 r0, r0) @ little endian instruction + mov r1, #4 @ set insn size to 4 for ARM +0: mov r0, sp uaccess_disable ip - add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 - str r2, [sp, #S_PC] @ it's a 2x16bit instr, update - orr r0, r0, r5, lsl #16 - badr lr, __und_usr_fault_32 - @ r0 = the two 16-bit Thumb instructions which caused the exception - @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc) - @ r4 = PC value for the first 16-bit Thumb instruction - @ lr = 32bit undefined instruction function - -#if __LINUX_ARM_ARCH__ < 7 -/* If the target arch was overridden, change it back: */ -#ifdef CONFIG_CPU_32v6K - .arch armv6k -#else - .arch armv6 -#endif -#endif /* __LINUX_ARM_ARCH__ < 7 */ -#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */ - b __und_usr_fault_16 -#endif + bl __und_fault + b ret_from_exception UNWIND(.fnend) ENDPROC(__und_usr) -/* - * The out of line fixup for the ldrt instructions above. - */ - .pushsection .text.fixup, "ax" - .align 2 -4: str r4, [sp, #S_PC] @ retry current instruction - ret r9 - .popsection - .pushsection __ex_table,"a" - .long 1b, 4b -#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 - .long 2b, 4b - .long 3b, 4b -#endif - .popsection - -/* - * Check whether the instruction is a co-processor instruction. - * If yes, we need to call the relevant co-processor handler. - * - * Note that we don't do a full check here for the co-processor - * instructions; all instructions with bit 27 set are well - * defined. The only instructions that should fault are the - * co-processor instructions. However, we have to watch out - * for the ARM6/ARM7 SWI bug. - * - * NEON is a special case that has to be handled here. Not all - * NEON instructions are co-processor instructions, so we have - * to make a special case of checking for them. Plus, there's - * five groups of them, so we have a table of mask/opcode pairs - * to check against, and if any match then we branch off into the - * NEON handler code. - * - * Emulators may wish to make use of the following registers: - * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb) - * r2 = PC value to resume execution after successful emulation - * r9 = normal "successful" return address - * r10 = this threads thread_info structure - * lr = unrecognised instruction return address - * IRQs enabled, FIQs enabled. - */ - @ - @ Fall-through from Thumb-2 __und_usr - @ -#ifdef CONFIG_NEON - get_thread_info r10 @ get current thread - adr r6, .LCneon_thumb_opcodes - b 2f -#endif -call_fpe: - get_thread_info r10 @ get current thread -#ifdef CONFIG_NEON - adr r6, .LCneon_arm_opcodes -2: ldr r5, [r6], #4 @ mask value - ldr r7, [r6], #4 @ opcode bits matching in mask - cmp r5, #0 @ end mask? - beq 1f - and r8, r0, r5 - cmp r8, r7 @ NEON instruction? - bne 2b - mov r7, #1 - strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used - strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used - b do_vfp @ let VFP handler handle this -1: -#endif - tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 - tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 - reteq lr - and r8, r0, #0x00000f00 @ mask out CP number - THUMB( lsr r8, r8, #8 ) - mov r7, #1 - add r6, r10, #TI_USED_CP - ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[] - THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[] -#ifdef CONFIG_IWMMXT - @ Test if we need to give access to iWMMXt coprocessors - ldr r5, [r10, #TI_FLAGS] - rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only - movscs r7, r5, lsr #(TIF_USING_IWMMXT + 1) - bcs iwmmxt_task_enable -#endif - ARM( add pc, pc, r8, lsr #6 ) - THUMB( lsl r8, r8, #2 ) - THUMB( add pc, r8 ) - nop - - ret.w lr @ CP#0 - W(b) do_fpe @ CP#1 (FPE) - W(b) do_fpe @ CP#2 (FPE) - ret.w lr @ CP#3 -#ifdef CONFIG_CRUNCH - b crunch_task_enable @ CP#4 (MaverickCrunch) - b crunch_task_enable @ CP#5 (MaverickCrunch) - b crunch_task_enable @ CP#6 (MaverickCrunch) -#else - ret.w lr @ CP#4 - ret.w lr @ CP#5 - ret.w lr @ CP#6 -#endif - ret.w lr @ CP#7 - ret.w lr @ CP#8 - ret.w lr @ CP#9 -#ifdef CONFIG_VFP - W(b) do_vfp @ CP#10 (VFP) - W(b) do_vfp @ CP#11 (VFP) -#else - ret.w lr @ CP#10 (VFP) - ret.w lr @ CP#11 (VFP) -#endif - ret.w lr @ CP#12 - ret.w lr @ CP#13 - ret.w lr @ CP#14 (Debug) - ret.w lr @ CP#15 (Control) - -#ifdef NEED_CPU_ARCHITECTURE - .align 2 -.LCcpu_architecture: - .word __cpu_architecture -#endif - -#ifdef CONFIG_NEON - .align 6 - -.LCneon_arm_opcodes: - .word 0xfe000000 @ mask - .word 0xf2000000 @ opcode - - .word 0xff100000 @ mask - .word 0xf4000000 @ opcode - - .word 0x00000000 @ mask - .word 0x00000000 @ opcode - -.LCneon_thumb_opcodes: - .word 0xef000000 @ mask - .word 0xef000000 @ opcode - - .word 0xff100000 @ mask - .word 0xf9000000 @ opcode - - .word 0x00000000 @ mask - .word 0x00000000 @ opcode -#endif - -do_fpe: - ldr r4, .LCfp - add r10, r10, #TI_FPSTATE @ r10 = workspace - ldr pc, [r4] @ Call FP module USR entry point - -/* - * The FP module is called with these registers set: - * r0 = instruction - * r2 = PC+4 - * r9 = normal "successful" return address - * r10 = FP workspace - * lr = unrecognised FP instruction return address - */ - - .pushsection .data - .align 2 -ENTRY(fp_enter) - .word no_fp - .popsection - -ENTRY(no_fp) - ret lr -ENDPROC(no_fp) - -__und_usr_fault_32: - mov r1, #4 - b 1f -__und_usr_fault_16_pan: - uaccess_disable ip -__und_usr_fault_16: - mov r1, #2 -1: mov r0, sp - badr lr, ret_from_exception - b __und_fault -ENDPROC(__und_usr_fault_32) -ENDPROC(__und_usr_fault_16) - .align 5 __pabt_usr: usr_entry @@ -788,14 +518,17 @@ ENTRY(__switch_to) ldr r6, [r2, #TI_CPU_DOMAIN] #endif switch_tls r1, r4, r5, r3, r7 -#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) - ldr r7, [r2, #TI_TASK] +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \ + !defined(CONFIG_STACKPROTECTOR_PER_TASK) ldr r8, =__stack_chk_guard .if (TSK_STACK_CANARY > IMM12_MASK) - add r7, r7, #TSK_STACK_CANARY & ~IMM12_MASK + add r9, r2, #TSK_STACK_CANARY & ~IMM12_MASK + ldr r9, [r9, #TSK_STACK_CANARY & IMM12_MASK] + .else + ldr r9, [r2, #TSK_STACK_CANARY & IMM12_MASK] .endif - ldr r7, [r7, #TSK_STACK_CANARY & IMM12_MASK] #endif + mov r7, r2 @ Preserve 'next' #ifdef CONFIG_CPU_USE_DOMAINS mcr p15, 0, r6, c3, c0, 0 @ Set domain register #endif @@ -804,18 +537,102 @@ ENTRY(__switch_to) ldr r0, =thread_notify_head mov r1, #THREAD_NOTIFY_SWITCH bl atomic_notifier_call_chain -#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) - str r7, [r8] +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \ + !defined(CONFIG_STACKPROTECTOR_PER_TASK) + str r9, [r8] #endif - THUMB( mov ip, r4 ) mov r0, r5 - ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously - THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously - THUMB( ldr sp, [ip], #4 ) - THUMB( ldr pc, [ip] ) +#if !defined(CONFIG_THUMB2_KERNEL) && !defined(CONFIG_VMAP_STACK) + set_current r7, r8 + ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously +#else + mov r1, r7 + ldmia r4, {r4 - sl, fp, ip, lr} @ Load all regs saved previously +#ifdef CONFIG_VMAP_STACK + @ + @ Do a dummy read from the new stack while running from the old one so + @ that we can rely on do_translation_fault() to fix up any stale PMD + @ entries covering the vmalloc region. + @ + ldr r2, [ip] +#endif + + @ When CONFIG_THREAD_INFO_IN_TASK=n, the update of SP itself is what + @ effectuates the task switch, as that is what causes the observable + @ values of current and current_thread_info to change. When + @ CONFIG_THREAD_INFO_IN_TASK=y, setting current (and therefore + @ current_thread_info) is done explicitly, and the update of SP just + @ switches us to another stack, with few other side effects. In order + @ to prevent this distinction from causing any inconsistencies, let's + @ keep the 'set_current' call as close as we can to the update of SP. + set_current r1, r2 + mov sp, ip + ret lr +#endif UNWIND(.fnend ) ENDPROC(__switch_to) +#ifdef CONFIG_VMAP_STACK + .text + .align 2 +__bad_stack: + @ + @ We've just detected an overflow. We need to load the address of this + @ CPU's overflow stack into the stack pointer register. We have only one + @ scratch register so let's use a sequence of ADDs including one + @ involving the PC, and decorate them with PC-relative group + @ relocations. As these are ARM only, switch to ARM mode first. + @ + @ We enter here with IP clobbered and its value stashed on the mode + @ stack. + @ +THUMB( bx pc ) +THUMB( nop ) +THUMB( .arm ) + ldr_this_cpu_armv6 ip, overflow_stack_ptr + + str sp, [ip, #-4]! @ Preserve original SP value + mov sp, ip @ Switch to overflow stack + pop {ip} @ Original SP in IP + +#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC) + mov ip, ip @ mov expected by unwinder + push {fp, ip, lr, pc} @ GCC flavor frame record +#else + str ip, [sp, #-8]! @ store original SP + push {fpreg, lr} @ Clang flavor frame record +#endif +UNWIND( ldr ip, [r0, #4] ) @ load exception LR +UNWIND( str ip, [sp, #12] ) @ store in the frame record + ldr ip, [r0, #12] @ reload IP + + @ Store the original GPRs to the new stack. + svc_entry uaccess=0, overflow_check=0 + +UNWIND( .save {sp, pc} ) +UNWIND( .save {fpreg, lr} ) +UNWIND( .setfp fpreg, sp ) + + ldr fpreg, [sp, #S_SP] @ Add our frame record + @ to the linked list +#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC) + ldr r1, [fp, #4] @ reload SP at entry + add fp, fp, #12 +#else + ldr r1, [fpreg, #8] +#endif + str r1, [sp, #S_SP] @ store in pt_regs + + @ Stash the regs for handle_bad_stack + mov r0, sp + + @ Time to die + bl handle_bad_stack + nop +UNWIND( .fnend ) +ENDPROC(__bad_stack) +#endif + __INIT /* @@ -826,7 +643,7 @@ ENDPROC(__switch_to) * existing ones. This mechanism should be used only for things that are * really small and justified, and not be abused freely. * - * See Documentation/arm/kernel_user_helpers.rst for formal definitions. + * See Documentation/arch/arm/kernel_user_helpers.rst for formal definitions. */ THUMB( .arm ) @@ -1029,17 +846,23 @@ __kuser_helper_end: */ .macro vector_stub, name, mode, correction=0 .align 5 +#ifdef CONFIG_HARDEN_BRANCH_HISTORY +vector_bhb_bpiall_\name: + mcr p15, 0, r0, c7, c5, 6 @ BPIALL + @ isb not needed due to "movs pc, lr" in the vector stub + @ which gives a "context synchronisation". +#endif vector_\name: .if \correction sub lr, lr, #\correction .endif - @ - @ Save r0, lr_<exception> (parent PC) and spsr_<exception> - @ (parent CPSR) - @ + @ Save r0, lr_<exception> (parent PC) stmia sp, {r0, lr} @ save r0, lr + + @ Save spsr_<exception> (parent CPSR) +.Lvec_\name: mrs lr, spsr str lr, [sp, #8] @ save spsr @@ -1061,14 +884,47 @@ vector_\name: movs pc, lr @ branch to handler in SVC mode ENDPROC(vector_\name) +#ifdef CONFIG_HARDEN_BRANCH_HISTORY + .subsection 1 + .align 5 +vector_bhb_loop8_\name: + .if \correction + sub lr, lr, #\correction + .endif + + @ Save r0, lr_<exception> (parent PC) + stmia sp, {r0, lr} + + @ bhb workaround + mov r0, #8 +3: W(b) . + 4 + subs r0, r0, #1 + bne 3b + dsb nsh + @ isb not needed due to "movs pc, lr" in the vector stub + @ which gives a "context synchronisation". + b .Lvec_\name +ENDPROC(vector_bhb_loop8_\name) + .previous +#endif + .align 2 @ handler addresses follow this label 1: .endm .section .stubs, "ax", %progbits - @ This must be the first word + @ These need to remain at the start of the section so that + @ they are in range of the 'SWI' entries in the vector tables + @ located 4k down. +.L__vector_swi: .word vector_swi +#ifdef CONFIG_HARDEN_BRANCH_HISTORY +.L__vector_bhb_loop8_swi: + .word vector_bhb_loop8_swi +.L__vector_bhb_bpiall_swi: + .word vector_bhb_bpiall_swi +#endif vector_rst: ARM( swi SYS_ERROR0 ) @@ -1183,8 +1039,10 @@ vector_addrexcptn: * FIQ "NMI" handler *----------------------------------------------------------------------------- * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86 - * systems. + * systems. This must be the last vector stub, so lets place it in its own + * subsection. */ + .subsection 2 vector_stub fiq, FIQ_MODE, 4 .long __fiq_usr @ 0 (USR_26 / USR_32) @@ -1207,16 +1065,43 @@ vector_addrexcptn: .globl vector_fiq .section .vectors, "ax", %progbits -.L__vectors_start: W(b) vector_rst W(b) vector_und - W(ldr) pc, .L__vectors_start + 0x1000 +ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_swi ) +THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_swi ) + W(ldr) pc, . W(b) vector_pabt W(b) vector_dabt W(b) vector_addrexcptn W(b) vector_irq W(b) vector_fiq +#ifdef CONFIG_HARDEN_BRANCH_HISTORY + .section .vectors.bhb.loop8, "ax", %progbits + W(b) vector_rst + W(b) vector_bhb_loop8_und +ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_loop8_swi ) +THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_bhb_loop8_swi ) + W(ldr) pc, . + W(b) vector_bhb_loop8_pabt + W(b) vector_bhb_loop8_dabt + W(b) vector_addrexcptn + W(b) vector_bhb_loop8_irq + W(b) vector_bhb_loop8_fiq + + .section .vectors.bhb.bpiall, "ax", %progbits + W(b) vector_rst + W(b) vector_bhb_bpiall_und +ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_bpiall_swi ) +THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_bhb_bpiall_swi ) + W(ldr) pc, . + W(b) vector_bhb_bpiall_pabt + W(b) vector_bhb_bpiall_dabt + W(b) vector_addrexcptn + W(b) vector_bhb_bpiall_irq + W(b) vector_bhb_bpiall_fiq +#endif + .data .align 2 diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 271cb8a1eba1..5c31e9de7a60 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -9,24 +9,17 @@ #include <asm/unistd.h> #include <asm/ftrace.h> #include <asm/unwind.h> -#include <asm/memory.h> +#include <asm/page.h> #ifdef CONFIG_AEABI #include <asm/unistd-oabi.h> #endif .equ NR_syscalls, __NR_syscalls -#ifdef CONFIG_NEED_RET_TO_USER -#include <mach/entry-macro.S> -#else - .macro arch_ret_to_user, tmp1, tmp2 - .endm -#endif - #include "entry-header.S" saved_psr .req r8 -#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING) +#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING_USER) saved_pc .req r9 #define TRACE(x...) x #else @@ -36,7 +29,7 @@ saved_pc .req lr .section .entry.text,"ax",%progbits .align 5 -#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING) || \ +#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING_USER) || \ IS_ENABLED(CONFIG_DEBUG_RSEQ)) /* * This is the fast syscall return path. We do as little as possible here, @@ -49,17 +42,10 @@ __ret_fast_syscall: UNWIND(.fnstart ) UNWIND(.cantunwind ) disable_irq_notrace @ disable interrupts - ldr r2, [tsk, #TI_ADDR_LIMIT] - cmp r2, #TASK_SIZE - blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing - tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK + movs r1, r1, lsl #16 bne fast_work_pending - - /* perform architecture specific actions before user return */ - arch_ret_to_user r1, lr - restore_user_regs fast = 1, offset = S_OFF UNWIND(.fnend ) ENDPROC(ret_fast_syscall) @@ -86,11 +72,8 @@ __ret_fast_syscall: bl do_rseq_syscall #endif disable_irq_notrace @ disable interrupts - ldr r2, [tsk, #TI_ADDR_LIMIT] - cmp r2, #TASK_SIZE - blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing - tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK + movs r1, r1, lsl #16 beq no_work_pending UNWIND(.fnend ) ENDPROC(ret_fast_syscall) @@ -107,6 +90,7 @@ slow_work_pending: cmp r0, #0 beq no_work_pending movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) + str scno, [tsk, #TI_ABI_SYSCALL] @ make sure tracers see update ldmia sp, {r0 - r6} @ have to reload r0 - r6 b local_restart @ ... and off we go ENDPROC(ret_fast_syscall) @@ -127,17 +111,12 @@ ret_slow_syscall: #endif disable_irq_notrace @ disable interrupts ENTRY(ret_to_user_from_irq) - ldr r2, [tsk, #TI_ADDR_LIMIT] - cmp r2, #TASK_SIZE - blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] - tst r1, #_TIF_WORK_MASK + movs r1, r1, lsl #16 bne slow_work_pending no_work_pending: asm_trace_hardirqs_on save = 0 - /* perform architecture specific actions before user return */ - arch_ret_to_user r1, lr ct_user_enter save = 0 restore_user_regs fast = 0, offset = 0 @@ -163,12 +142,36 @@ ENDPROC(ret_from_fork) */ .align 5 +#ifdef CONFIG_HARDEN_BRANCH_HISTORY +ENTRY(vector_bhb_loop8_swi) + sub sp, sp, #PT_REGS_SIZE + stmia sp, {r0 - r12} + mov r8, #8 +1: b 2f +2: subs r8, r8, #1 + bne 1b + dsb nsh + isb + b 3f +ENDPROC(vector_bhb_loop8_swi) + + .align 5 +ENTRY(vector_bhb_bpiall_swi) + sub sp, sp, #PT_REGS_SIZE + stmia sp, {r0 - r12} + mcr p15, 0, r8, c7, c5, 6 @ BPIALL + isb + b 3f +ENDPROC(vector_bhb_bpiall_swi) +#endif + .align 5 ENTRY(vector_swi) #ifdef CONFIG_CPU_V7M v7m_exception_entry #else sub sp, sp, #PT_REGS_SIZE stmia sp, {r0 - r12} @ Calling r0 - r12 +3: ARM( add r8, sp, #S_PC ) ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr THUMB( mov r8, sp ) @@ -179,8 +182,9 @@ ENTRY(vector_swi) str saved_psr, [sp, #S_PSR] @ Save CPSR str r0, [sp, #S_OLD_R0] @ Save OLD_R0 #endif + reload_current r10, ip zero_fp - alignment_trap r10, ip, __cr_alignment + alignment_trap r10, ip, cr_alignment asm_trace_hardirqs_on save=0 enable_irq_notrace ct_user_exit save=0 @@ -223,6 +227,7 @@ ENTRY(vector_swi) /* saved_psr and saved_pc are now dead */ uaccess_disable tbl + get_thread_info tsk adr tbl, sys_call_table @ load syscall table pointer @@ -234,13 +239,17 @@ ENTRY(vector_swi) * get the old ABI syscall table address. */ bics r10, r10, #0xff000000 + strne r10, [tsk, #TI_ABI_SYSCALL] + streq scno, [tsk, #TI_ABI_SYSCALL] eorne scno, r10, #__NR_OABI_SYSCALL_BASE ldrne tbl, =sys_oabi_call_table #elif !defined(CONFIG_AEABI) bic scno, scno, #0xff000000 @ mask off SWI op-code + str scno, [tsk, #TI_ABI_SYSCALL] eor scno, scno, #__NR_SYSCALL_BASE @ check OS number +#else + str scno, [tsk, #TI_ABI_SYSCALL] #endif - get_thread_info tsk /* * Reload the registers that may have been corrupted on entry to * the syscall assembly (by tracing or context tracking.) @@ -279,13 +288,13 @@ local_restart: b ret_fast_syscall #endif ENDPROC(vector_swi) + .ltorg /* * This is the really slow path. We're going to be doing * context switches, and waiting for our parent to respond. */ __sys_trace: - mov r1, scno add r0, sp, #S_OFF bl syscall_trace_enter mov scno, r0 @@ -306,14 +315,6 @@ __sys_trace_return: bl syscall_trace_exit b ret_slow_syscall - .align 5 -#ifdef CONFIG_ALIGNMENT_TRAP - .type __cr_alignment, #object -__cr_alignment: - .word cr_alignment -#endif - .ltorg - .macro syscall_table_start, sym .equ __sys_nr, 0 .type \sym, #object @@ -341,20 +342,19 @@ ENTRY(\sym) .size \sym, . - \sym .endm -#define NATIVE(nr, func) syscall nr, func +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) +#define __SYSCALL(nr, func) syscall nr, func /* * This is the syscall table declaration for native ABI syscalls. * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. */ syscall_table_start sys_call_table -#define COMPAT(nr, native, compat) syscall nr, native #ifdef CONFIG_AEABI #include <calls-eabi.S> #else #include <calls-oabi.S> #endif -#undef COMPAT syscall_table_end sys_call_table /*============================================================================ @@ -452,7 +452,8 @@ ENDPROC(sys_oabi_readahead) * using the compatibility syscall entries. */ syscall_table_start sys_oabi_call_table -#define COMPAT(nr, native, compat) syscall nr, compat +#undef __SYSCALL_WITH_COMPAT +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) #include <calls-oabi.S> syscall_table_end sys_oabi_call_table diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S index a74289ebc803..3e7bcaca5e07 100644 --- a/arch/arm/kernel/entry-ftrace.S +++ b/arch/arm/kernel/entry-ftrace.S @@ -22,12 +22,9 @@ * mcount can be thought of as a function called in the middle of a subroutine * call. As such, it needs to be transparent for both the caller and the * callee: the original lr needs to be restored when leaving mcount, and no - * registers should be clobbered. (In the __gnu_mcount_nc implementation, we - * clobber the ip register. This is OK because the ARM calling convention - * allows it to be clobbered in subroutines and doesn't use it to hold - * parameters.) + * registers should be clobbered. * - * When using dynamic ftrace, we patch out the mcount call by a "pop {lr}" + * When using dynamic ftrace, we patch out the mcount call by a "add sp, #4" * instead of the __gnu_mcount_nc call (see arch/arm/kernel/ftrace.c). */ @@ -38,23 +35,20 @@ .macro __mcount suffix mcount_enter - ldr r0, =ftrace_trace_function - ldr r2, [r0] - adr r0, .Lftrace_stub + ldr_va r2, ftrace_trace_function + badr r0, .Lftrace_stub cmp r0, r2 bne 1f #ifdef CONFIG_FUNCTION_GRAPH_TRACER - ldr r1, =ftrace_graph_return - ldr r2, [r1] - cmp r0, r2 - bne ftrace_graph_caller\suffix - - ldr r1, =ftrace_graph_entry - ldr r2, [r1] - ldr r0, =ftrace_graph_entry_stub - cmp r0, r2 - bne ftrace_graph_caller\suffix + ldr_va r2, ftrace_graph_return + cmp r0, r2 + bne ftrace_graph_caller\suffix + + ldr_va r2, ftrace_graph_entry + mov_l r0, ftrace_graph_entry_stub + cmp r0, r2 + bne ftrace_graph_caller\suffix #endif mcount_exit @@ -70,29 +64,27 @@ .macro __ftrace_regs_caller - sub sp, sp, #8 @ space for PC and CPSR OLD_R0, + str lr, [sp, #-8]! @ store LR as PC and make space for CPSR/OLD_R0, @ OLD_R0 will overwrite previous LR - add ip, sp, #12 @ move in IP the value of SP as it was - @ before the push {lr} of the mcount mechanism + ldr lr, [sp, #8] @ get previous LR - str lr, [sp, #0] @ store LR instead of PC + str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR - ldr lr, [sp, #8] @ get previous LR + str lr, [sp, #-4]! @ store previous LR as LR - str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR + add lr, sp, #16 @ move in LR the value of SP as it was + @ before the push {lr} of the mcount mechanism - stmdb sp!, {ip, lr} - stmdb sp!, {r0-r11, lr} + push {r0-r11, ip, lr} @ stack content at this point: @ 0 4 48 52 56 60 64 68 72 - @ R0 | R1 | ... | LR | SP + 4 | previous LR | LR | PSR | OLD_R0 | + @ R0 | R1 | ... | IP | SP + 4 | previous LR | LR | PSR | OLD_R0 | - mov r3, sp @ struct pt_regs* + mov r3, sp @ struct pt_regs* - ldr r2, =function_trace_op - ldr r2, [r2] @ pointer to the current + ldr_va r2, function_trace_op @ pointer to the current @ function tracing op ldr r1, [sp, #S_LR] @ lr of instrumented func @@ -108,35 +100,37 @@ ftrace_regs_call: #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_regs_call ftrace_graph_regs_call: - mov r0, r0 +ARM( mov r0, r0 ) +THUMB( nop.w ) #endif @ pop saved regs - ldmia sp!, {r0-r12} @ restore r0 through r12 - ldr ip, [sp, #8] @ restore PC - ldr lr, [sp, #4] @ restore LR - ldr sp, [sp, #0] @ restore SP - mov pc, ip @ return + pop {r0-r11, ip, lr} @ restore r0 through r12 + ldr lr, [sp], #4 @ restore LR + ldr pc, [sp], #12 .endm #ifdef CONFIG_FUNCTION_GRAPH_TRACER .macro __ftrace_graph_regs_caller - sub r0, fp, #4 @ lr of instrumented routine (parent) +#ifdef CONFIG_UNWINDER_FRAME_POINTER + sub r0, fp, #4 @ lr of instrumented routine (parent) +#else + add r0, sp, #S_LR +#endif @ called from __ftrace_regs_caller - ldr r1, [sp, #S_PC] @ instrumented routine (func) + ldr r1, [sp, #S_PC] @ instrumented routine (func) mcount_adjust_addr r1, r1 - mov r2, fp @ frame pointer + mov r2, fpreg @ frame pointer + add r3, sp, #PT_REGS_SIZE bl prepare_ftrace_return @ pop registers saved in ftrace_regs_caller - ldmia sp!, {r0-r12} @ restore r0 through r12 - ldr ip, [sp, #8] @ restore PC - ldr lr, [sp, #4] @ restore LR - ldr sp, [sp, #0] @ restore SP - mov pc, ip @ return + pop {r0-r11, ip, lr} @ restore r0 through r12 + ldr lr, [sp], #4 @ restore LR + ldr pc, [sp], #12 .endm #endif @@ -149,8 +143,7 @@ ftrace_graph_regs_call: mcount_adjust_addr r0, lr @ instrumented function #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS - ldr r2, =function_trace_op - ldr r2, [r2] @ pointer to the current + ldr_va r2, function_trace_op @ pointer to the current @ function tracing op mov r3, #0 @ regs is NULL #endif @@ -162,14 +155,19 @@ ftrace_call\suffix: #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call\suffix ftrace_graph_call\suffix: - mov r0, r0 +ARM( mov r0, r0 ) +THUMB( nop.w ) #endif mcount_exit .endm .macro __ftrace_graph_caller +#ifdef CONFIG_UNWINDER_FRAME_POINTER sub r0, fp, #4 @ &lr of instrumented routine (&parent) +#else + add r0, sp, #20 +#endif #ifdef CONFIG_DYNAMIC_FTRACE @ called from __ftrace_caller, saved in mcount_enter ldr r1, [sp, #16] @ instrumented routine (func) @@ -178,7 +176,8 @@ ftrace_graph_call\suffix: @ called from __mcount, untouched in lr mcount_adjust_addr r1, lr @ instrumented routine (func) #endif - mov r2, fp @ frame pointer + mov r2, fpreg @ frame pointer + add r3, sp, #24 bl prepare_ftrace_return mcount_exit .endm @@ -202,16 +201,17 @@ ftrace_graph_call\suffix: .endm .macro mcount_exit - ldmia sp!, {r0-r3, ip, lr} - ret ip + ldmia sp!, {r0-r3} + ldr lr, [sp, #4] + ldr pc, [sp], #8 .endm ENTRY(__gnu_mcount_nc) UNWIND(.fnstart) #ifdef CONFIG_DYNAMIC_FTRACE - mov ip, lr - ldmia sp!, {lr} - ret ip + push {lr} + ldr lr, [sp, #4] + ldr pc, [sp], #8 #else __mcount #endif @@ -256,17 +256,33 @@ ENDPROC(ftrace_graph_regs_caller) .purgem mcount_exit #ifdef CONFIG_FUNCTION_GRAPH_TRACER - .globl return_to_handler -return_to_handler: +ENTRY(return_to_handler) stmdb sp!, {r0-r3} - mov r0, fp @ frame pointer + add r0, sp, #16 @ sp at exit of instrumented routine bl ftrace_return_to_handler mov lr, r0 @ r0 has real ret addr ldmia sp!, {r0-r3} ret lr +ENDPROC(return_to_handler) #endif ENTRY(ftrace_stub) .Lftrace_stub: ret lr ENDPROC(ftrace_stub) + +#ifdef CONFIG_DYNAMIC_FTRACE + + __INIT + + .macro init_tramp, dst:req +ENTRY(\dst\()_from_init) + ldr pc, =\dst +ENDPROC(\dst\()_from_init) + .endm + + init_tramp ftrace_caller +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + init_tramp ftrace_regs_caller +#endif +#endif diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 32051ec5b33f..99411fa91350 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -6,6 +6,7 @@ #include <asm/asm-offsets.h> #include <asm/errno.h> #include <asm/thread_info.h> +#include <asm/uaccess-asm.h> #include <asm/v7m.h> @ Bad Abort numbers @@ -47,8 +48,7 @@ .macro alignment_trap, rtmp1, rtmp2, label #ifdef CONFIG_ALIGNMENT_TRAP mrc p15, 0, \rtmp2, c1, c0, 0 - ldr \rtmp1, \label - ldr \rtmp1, [\rtmp1] + ldr_va \rtmp1, \label teq \rtmp1, \rtmp2 mcrne p15, 0, \rtmp1, c1, c0, 0 #endif @@ -217,9 +217,7 @@ blne trace_hardirqs_off #endif .endif - ldr r1, [sp, #SVC_ADDR_LIMIT] - uaccess_restore - str r1, [tsk, #TI_ADDR_LIMIT] + uaccess_exit tsk, r0, r1 #ifndef CONFIG_THUMB2_KERNEL @ ARM mode SVC restore @@ -263,9 +261,7 @@ @ on the stack remains correct). @ .macro svc_exit_via_fiq - ldr r1, [sp, #SVC_ADDR_LIMIT] - uaccess_restore - str r1, [tsk, #TI_ADDR_LIMIT] + uaccess_exit tsk, r0, r1 #ifndef CONFIG_THUMB2_KERNEL @ ARM mode restore mov r0, sp @@ -295,6 +291,20 @@ .macro restore_user_regs, fast = 0, offset = 0 +#if defined(CONFIG_CPU_32v6K) && \ + (!defined(CONFIG_CPU_V6) || defined(CONFIG_SMP)) +#ifdef CONFIG_CPU_V6 +ALT_SMP(nop) +ALT_UP_B(.L1_\@) +#endif + @ The TLS register update is deferred until return to user space so we + @ can use it for other things while running in the kernel + mrc p15, 0, r1, c13, c0, 3 @ get current_thread_info pointer + ldr r1, [r1, #TI_TP_VALUE] + mcr p15, 0, r1, c13, c0, 3 @ set TLS register +.L1_\@: +#endif + uaccess_enable r1, isb=0 #ifndef CONFIG_THUMB2_KERNEL @ ARM mode restore @@ -356,25 +366,25 @@ * between user and kernel mode. */ .macro ct_user_exit, save = 1 -#ifdef CONFIG_CONTEXT_TRACKING +#ifdef CONFIG_CONTEXT_TRACKING_USER .if \save stmdb sp!, {r0-r3, ip, lr} - bl context_tracking_user_exit + bl user_exit_callable ldmia sp!, {r0-r3, ip, lr} .else - bl context_tracking_user_exit + bl user_exit_callable .endif #endif .endm .macro ct_user_enter, save = 1 -#ifdef CONFIG_CONTEXT_TRACKING +#ifdef CONFIG_CONTEXT_TRACKING_USER .if \save stmdb sp!, {r0-r3, ip, lr} - bl context_tracking_user_enter + bl user_enter_callable ldmia sp!, {r0-r3, ip, lr} .else - bl context_tracking_user_enter + bl user_enter_callable .endif #endif .endm @@ -418,3 +428,40 @@ scno .req r7 @ syscall number tbl .req r8 @ syscall table pointer why .req r8 @ Linux syscall (!= 0) tsk .req r9 @ current thread_info + + .macro do_overflow_check, frame_size:req +#ifdef CONFIG_VMAP_STACK + @ + @ Test whether the SP has overflowed. Task and IRQ stacks are aligned + @ so that SP & BIT(THREAD_SIZE_ORDER + PAGE_SHIFT) should always be + @ zero. + @ +ARM( tst sp, #1 << (THREAD_SIZE_ORDER + PAGE_SHIFT) ) +THUMB( tst r1, #1 << (THREAD_SIZE_ORDER + PAGE_SHIFT) ) +THUMB( it ne ) + bne .Lstack_overflow_check\@ + + .pushsection .text +.Lstack_overflow_check\@: + @ + @ The stack pointer is not pointing to a valid vmap'ed stack, but it + @ may be pointing into the linear map instead, which may happen if we + @ are already running from the overflow stack. We cannot detect overflow + @ in such cases so just carry on. + @ + str ip, [r0, #12] @ Stash IP on the mode stack + ldr_va ip, high_memory @ Start of VMALLOC space +ARM( cmp sp, ip ) @ SP in vmalloc space? +THUMB( cmp r1, ip ) +THUMB( itt lo ) + ldrlo ip, [r0, #12] @ Restore IP + blo .Lout\@ @ Carry on + +THUMB( sub r1, sp, r1 ) @ Restore original R1 +THUMB( sub sp, r1 ) @ Restore original SP + add sp, sp, #\frame_size @ Undo svc_entry's SP change + b __bad_stack @ Handle VMAP stack overflow + .popsection +.Lout\@: +#endif + .endm diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S index de1f20624be1..52bacf07ba16 100644 --- a/arch/arm/kernel/entry-v7m.S +++ b/arch/arm/kernel/entry-v7m.S @@ -6,7 +6,7 @@ * * Low-level vector interface routines for the ARMv7-M architecture */ -#include <asm/memory.h> +#include <asm/page.h> #include <asm/glue.h> #include <asm/thread_notify.h> #include <asm/v7m.h> @@ -23,7 +23,7 @@ __invalid_entry: adr r0, strerr mrs r1, ipsr mov r2, lr - bl printk + bl _printk #endif mov r0, sp bl show_regs @@ -39,16 +39,25 @@ __irq_entry: @ @ Invoke the IRQ handler @ - mrs r0, ipsr - ldr r1, =V7M_xPSR_EXCEPTIONNO - and r0, r1 - sub r0, #16 - mov r1, sp - stmdb sp!, {lr} - @ routine called with r0 = irq number, r1 = struct pt_regs * - bl nvic_handle_irq - - pop {lr} + mov r0, sp + ldr_this_cpu sp, irq_stack_ptr, r1, r2 + + @ + @ If we took the interrupt while running in the kernel, we may already + @ be using the IRQ stack, so revert to the original value in that case. + @ + subs r2, sp, r0 @ SP above bottom of IRQ stack? + rsbscs r2, r2, #THREAD_SIZE @ ... and below the top? + movcs sp, r0 + + push {r0, lr} @ preserve LR and original SP + + @ routine called with r0 = struct pt_regs * + bl generic_handle_arch_irq + + pop {r0, lr} + mov sp, r0 + @ @ Check for any pending work if returning to user @ @@ -59,7 +68,7 @@ __irq_entry: get_thread_info tsk ldr r2, [tsk, #TI_FLAGS] - tst r2, #_TIF_WORK_MASK + movs r2, r2, lsl #16 beq 2f @ no work pending mov r0, #V7M_SCB_ICSR_PENDSVSET str r0, [r1, V7M_SCB_ICSR] @ raise PendSV @@ -101,15 +110,17 @@ ENTRY(__switch_to) str sp, [ip], #4 str lr, [ip], #4 mov r5, r0 + mov r6, r2 @ Preserve 'next' add r4, r2, #TI_CPU_SAVE ldr r0, =thread_notify_head mov r1, #THREAD_NOTIFY_SWITCH bl atomic_notifier_call_chain - mov ip, r4 mov r0, r5 - ldmia ip!, {r4 - r11} @ Load all regs saved previously - ldr sp, [ip] - ldr pc, [ip, #4]! + mov r1, r6 + ldmia r4, {r4 - r12, lr} @ Load all regs saved previously + set_current r1, r2 + mov sp, ip + bx lr .fnend ENDPROC(__switch_to) diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index cd1234c103fc..d2c8e5313539 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c @@ -45,6 +45,7 @@ #include <asm/cacheflush.h> #include <asm/cp15.h> #include <asm/fiq.h> +#include <asm/mach/irq.h> #include <asm/irq.h> #include <asm/traps.h> @@ -98,8 +99,8 @@ void set_fiq_handler(void *start, unsigned int length) memcpy(base + offset, start, length); if (!cache_is_vipt_nonaliasing()) - flush_icache_range((unsigned long)base + offset, offset + - length); + flush_icache_range((unsigned long)base + offset, + (unsigned long)base + offset + length); flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); } diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index bda949fd84e8..a0b6d1e3812f 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -22,11 +22,24 @@ #include <asm/ftrace.h> #include <asm/insn.h> #include <asm/set_memory.h> +#include <asm/stacktrace.h> +#include <asm/patch.h> +/* + * The compiler emitted profiling hook consists of + * + * PUSH {LR} + * BL __gnu_mcount_nc + * + * To turn this combined sequence into a NOP, we need to restore the value of + * SP before the PUSH. Let's use an ADD rather than a POP into LR, as LR is not + * modified anyway, and reloading LR from memory is highly likely to be less + * efficient. + */ #ifdef CONFIG_THUMB2_KERNEL -#define NOP 0xf85deb04 /* pop.w {lr} */ +#define NOP 0xf10d0d04 /* add.w sp, sp, #4 */ #else -#define NOP 0xe8bd4000 /* pop {lr} */ +#define NOP 0xe28dd004 /* add sp, sp, #4 */ #endif #ifdef CONFIG_DYNAMIC_FTRACE @@ -35,9 +48,7 @@ static int __ftrace_modify_code(void *data) { int *command = data; - set_kernel_text_rw(); ftrace_modify_all_code(*command); - set_kernel_text_ro(); return 0; } @@ -52,28 +63,36 @@ static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) return NOP; } -static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr) +void ftrace_caller_from_init(void); +void ftrace_regs_caller_from_init(void); + +static unsigned long __ref adjust_address(struct dyn_ftrace *rec, + unsigned long addr) { - return addr; + if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE) || + system_state >= SYSTEM_FREEING_INITMEM || + likely(!is_kernel_inittext(rec->ip))) + return addr; + if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) || + addr == (unsigned long)&ftrace_caller) + return (unsigned long)&ftrace_caller_from_init; + return (unsigned long)&ftrace_regs_caller_from_init; } -int ftrace_arch_code_modify_prepare(void) +void ftrace_arch_code_modify_prepare(void) { - set_all_modules_text_rw(); - return 0; } -int ftrace_arch_code_modify_post_process(void) +void ftrace_arch_code_modify_post_process(void) { - set_all_modules_text_ro(); /* Make sure any TLB misses during machine stop are cleared. */ flush_tlb_all(); - return 0; } -static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) +static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr, + bool warn) { - return arm_gen_branch_link(pc, addr); + return arm_gen_branch_link(pc, addr, warn); } static int ftrace_modify_code(unsigned long pc, unsigned long old, @@ -81,26 +100,21 @@ static int ftrace_modify_code(unsigned long pc, unsigned long old, { unsigned long replaced; - if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) { + if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) old = __opcode_to_mem_thumb32(old); - new = __opcode_to_mem_thumb32(new); - } else { + else old = __opcode_to_mem_arm(old); - new = __opcode_to_mem_arm(new); - } if (validate) { - if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE)) + if (copy_from_kernel_nofault(&replaced, (void *)pc, + MCOUNT_INSN_SIZE)) return -EFAULT; if (replaced != old) return -EINVAL; } - if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE)) - return -EPERM; - - flush_icache_range(pc, pc + MCOUNT_INSN_SIZE); + __patch_text((void *)pc, new); return 0; } @@ -112,14 +126,14 @@ int ftrace_update_ftrace_func(ftrace_func_t func) int ret; pc = (unsigned long)&ftrace_call; - new = ftrace_call_replace(pc, (unsigned long)func); + new = ftrace_call_replace(pc, (unsigned long)func, true); ret = ftrace_modify_code(pc, 0, new, false); #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS if (!ret) { pc = (unsigned long)&ftrace_regs_call; - new = ftrace_call_replace(pc, (unsigned long)func); + new = ftrace_call_replace(pc, (unsigned long)func, true); ret = ftrace_modify_code(pc, 0, new, false); } @@ -132,10 +146,22 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { unsigned long new, old; unsigned long ip = rec->ip; + unsigned long aaddr = adjust_address(rec, addr); + struct module *mod = NULL; + +#ifdef CONFIG_ARM_MODULE_PLTS + mod = rec->arch.mod; +#endif old = ftrace_nop_replace(rec); - new = ftrace_call_replace(ip, adjust_address(rec, addr)); + new = ftrace_call_replace(ip, aaddr, !mod); +#ifdef CONFIG_ARM_MODULE_PLTS + if (!new && mod) { + aaddr = get_module_plt(mod, ip, aaddr); + new = ftrace_call_replace(ip, aaddr, true); + } +#endif return ftrace_modify_code(rec->ip, old, new, true); } @@ -148,9 +174,9 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long new, old; unsigned long ip = rec->ip; - old = ftrace_call_replace(ip, adjust_address(rec, old_addr)); + old = ftrace_call_replace(ip, adjust_address(rec, old_addr), true); - new = ftrace_call_replace(ip, adjust_address(rec, addr)); + new = ftrace_call_replace(ip, adjust_address(rec, addr), true); return ftrace_modify_code(rec->ip, old, new, true); } @@ -160,27 +186,47 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { + unsigned long aaddr = adjust_address(rec, addr); unsigned long ip = rec->ip; unsigned long old; unsigned long new; int ret; - old = ftrace_call_replace(ip, adjust_address(rec, addr)); +#ifdef CONFIG_ARM_MODULE_PLTS + /* mod is only supplied during module loading */ + if (!mod) + mod = rec->arch.mod; + else + rec->arch.mod = mod; +#endif + + old = ftrace_call_replace(ip, aaddr, + !IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || !mod); +#ifdef CONFIG_ARM_MODULE_PLTS + if (!old && mod) { + aaddr = get_module_plt(mod, ip, aaddr); + old = ftrace_call_replace(ip, aaddr, true); + } +#endif + new = ftrace_nop_replace(rec); - ret = ftrace_modify_code(ip, old, new, true); + /* + * Locations in .init.text may call __gnu_mcount_mc via a linker + * emitted veneer if they are too far away from its implementation, and + * so validation may fail spuriously in such cases. Let's work around + * this by omitting those from validation. + */ + ret = ftrace_modify_code(ip, old, new, !is_kernel_inittext(ip)); return ret; } - -int __init ftrace_dyn_arch_init(void) -{ - return 0; -} #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER +asmlinkage void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, - unsigned long frame_pointer) + unsigned long frame_pointer, + unsigned long stack_pointer) { unsigned long return_hooker = (unsigned long) &return_to_handler; unsigned long old; @@ -188,6 +234,23 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; + if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER)) { + /* FP points one word below parent's top of stack */ + frame_pointer += 4; + } else { + struct stackframe frame = { + .fp = frame_pointer, + .sp = stack_pointer, + .lr = self_addr, + .pc = self_addr, + }; + if (unwind_frame(&frame) < 0) + return; + if (frame.lr != self_addr) + parent = frame.lr_addr; + frame_pointer = frame.sp; + } + old = *parent; *parent = return_hooker; @@ -208,7 +271,7 @@ static int __ftrace_modify_caller(unsigned long *callsite, unsigned long caller_fn = (unsigned long) func; unsigned long pc = (unsigned long) callsite; unsigned long branch = arm_gen_branch(pc, caller_fn); - unsigned long nop = 0xe1a00000; /* mov r0, r0 */ + unsigned long nop = arm_gen_nop(); unsigned long old = enable ? nop : branch; unsigned long new = enable ? branch : nop; diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 4a3982812a40..42cae73fcc19 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S @@ -95,7 +95,7 @@ __mmap_switched: THUMB( ldmia r4!, {r0, r1, r2, r3} ) THUMB( mov sp, r3 ) sub r2, r2, r1 - bl memcpy @ copy .data to RAM + bl __memcpy @ copy .data to RAM #endif ARM( ldmia r4!, {r0, r1, sp} ) @@ -103,7 +103,10 @@ __mmap_switched: THUMB( mov sp, r3 ) sub r2, r1, r0 mov r1, #0 - bl memset @ clear .bss + bl __memset @ clear .bss + + adr_l r0, init_task @ get swapper task_struct + set_current r0, r1 ldmia r4, {r0, r1, r2, r3} str r9, [r0] @ Save processor ID @@ -111,6 +114,9 @@ __mmap_switched: str r8, [r2] @ Save atags pointer cmp r3, #0 strne r10, [r3] @ Save control register values +#ifdef CONFIG_KASAN + bl kasan_early_init +#endif mov lr, #0 b start_kernel ENDPROC(__mmap_switched) @@ -170,11 +176,12 @@ ENDPROC(lookup_processor_type) * r9 = cpuid (preserved) */ __lookup_processor_type: - adr r3, __lookup_processor_type_data - ldmia r3, {r4 - r6} - sub r3, r3, r4 @ get offset between virt&phys - add r5, r5, r3 @ convert virt addresses to - add r6, r6, r3 @ physical address space + /* + * Look in <asm/procinfo.h> for information about the __proc_info + * structure. + */ + adr_l r5, __proc_info_begin + adr_l r6, __proc_info_end 1: ldmia r5, {r3, r4} @ value, mask and r4, r4, r9 @ mask wanted bits teq r3, r4 @@ -186,17 +193,6 @@ __lookup_processor_type: 2: ret lr ENDPROC(__lookup_processor_type) -/* - * Look in <asm/procinfo.h> for information about the __proc_info structure. - */ - .align 2 - .type __lookup_processor_type_data, %object -__lookup_processor_type_data: - .long . - .long __proc_info_begin - .long __proc_info_end - .size __lookup_processor_type_data, . - __lookup_processor_type_data - __error_lpae: #ifdef CONFIG_DEBUG_LL adr r0, str_lpae diff --git a/arch/arm/kernel/head-inflate-data.c b/arch/arm/kernel/head-inflate-data.c index 89a52104d32a..225c0699a12c 100644 --- a/arch/arm/kernel/head-inflate-data.c +++ b/arch/arm/kernel/head-inflate-data.c @@ -8,16 +8,13 @@ #include <linux/init.h> #include <linux/zutil.h> +#include "head.h" /* for struct inflate_state */ #include "../../../lib/zlib_inflate/inftrees.h" #include "../../../lib/zlib_inflate/inflate.h" #include "../../../lib/zlib_inflate/infutil.h" -extern char __data_loc[]; -extern char _edata_loc[]; -extern char _sdata[]; - /* * This code is called very early during the boot process to decompress * the .data segment stored compressed in ROM. Therefore none of the global diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 0fc814bbc34b..b9d6818f1ee1 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -14,12 +14,11 @@ #include <asm/assembler.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> -#include <asm/memory.h> +#include <asm/page.h> #include <asm/cp15.h> #include <asm/thread_info.h> #include <asm/v7m.h> #include <asm/mpu.h> -#include <asm/page.h> /* * Kernel startup entry point. @@ -114,7 +113,9 @@ ENTRY(secondary_startup) add r12, r12, r10 ret r12 1: bl __after_proc_init + ldr r7, __secondary_data @ reload r7 ldr sp, [r7, #12] @ set up the stack pointer + ldr r0, [r7, #16] @ set up task pointer mov fp, #0 b secondary_start_kernel ENDPROC(secondary_startup) diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index c49b39340ddb..1ec35f065617 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -10,20 +10,19 @@ */ #include <linux/linkage.h> #include <linux/init.h> +#include <linux/pgtable.h> #include <asm/assembler.h> #include <asm/cp15.h> #include <asm/domain.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> -#include <asm/memory.h> +#include <asm/page.h> #include <asm/thread_info.h> -#include <asm/pgtable.h> #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING) #include CONFIG_DEBUG_LL_INCLUDE #endif - /* * swapper_pg_dir is the virtual address of the initial page table. * We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must @@ -31,7 +30,7 @@ * the least significant 16 bits to be 0x8000, but we could probably * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000. */ -#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) +#define KERNEL_RAM_VADDR (KERNEL_OFFSET + TEXT_OFFSET) #if (KERNEL_RAM_VADDR & 0xffff) != 0x8000 #error KERNEL_RAM_VADDR must start at 0xXXXX8000 #endif @@ -39,15 +38,32 @@ #ifdef CONFIG_ARM_LPAE /* LPAE requires an additional page for the PGD */ #define PG_DIR_SIZE 0x5000 -#define PMD_ORDER 3 +#define PMD_ENTRY_ORDER 3 /* PMD entry size is 2^PMD_ENTRY_ORDER */ #else #define PG_DIR_SIZE 0x4000 -#define PMD_ORDER 2 +#define PMD_ENTRY_ORDER 2 #endif .globl swapper_pg_dir .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE + /* + * This needs to be assigned at runtime when the linker symbols are + * resolved. These are unsigned 64bit really, but in this assembly code + * We store them as 32bit. + */ + .pushsection .data + .align 2 + .globl kernel_sec_start + .globl kernel_sec_end +kernel_sec_start: + .long 0 + .long 0 +kernel_sec_end: + .long 0 + .long 0 + .popsection + .macro pgtbl, rd, phys add \rd, \phys, #TEXT_OFFSET sub \rd, \rd, #PG_DIR_SIZE @@ -103,10 +119,8 @@ ENTRY(stext) #endif #ifndef CONFIG_XIP_KERNEL - adr r3, 2f - ldmia r3, {r4, r8} - sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET) - add r8, r8, r4 @ PHYS_OFFSET + adr_l r8, _text @ __pa(_text) + sub r8, r8, #TEXT_OFFSET @ PHYS_OFFSET #else ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case #endif @@ -158,10 +172,6 @@ ENTRY(stext) 1: b __enable_mmu ENDPROC(stext) .ltorg -#ifndef CONFIG_XIP_KERNEL -2: .long . - .long PAGE_OFFSET -#endif /* * Setup the initial page tables. We only setup the barest @@ -224,31 +234,43 @@ __create_page_tables: * Create identity mapping to cater for __enable_mmu. * This identity mapping will be removed by paging_init(). */ - adr r0, __turn_mmu_on_loc - ldmia r0, {r3, r5, r6} - sub r0, r0, r3 @ virt->phys offset - add r5, r5, r0 @ phys __turn_mmu_on - add r6, r6, r0 @ phys __turn_mmu_on_end + adr_l r5, __turn_mmu_on @ _pa(__turn_mmu_on) + adr_l r6, __turn_mmu_on_end @ _pa(__turn_mmu_on_end) mov r5, r5, lsr #SECTION_SHIFT mov r6, r6, lsr #SECTION_SHIFT 1: orr r3, r7, r5, lsl #SECTION_SHIFT @ flags + kernel base - str r3, [r4, r5, lsl #PMD_ORDER] @ identity mapping + str r3, [r4, r5, lsl #PMD_ENTRY_ORDER] @ identity mapping cmp r5, r6 addlo r5, r5, #1 @ next section blo 1b /* - * Map our RAM from the start to the end of the kernel .bss section. + * The main matter: map in the kernel using section mappings, and + * set two variables to indicate the physical start and end of the + * kernel. */ - add r0, r4, #PAGE_OFFSET >> (SECTION_SHIFT - PMD_ORDER) + add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ENTRY_ORDER) ldr r6, =(_end - 1) - orr r3, r8, r7 - add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) -1: str r3, [r0], #1 << PMD_ORDER + adr_l r5, kernel_sec_start @ _pa(kernel_sec_start) +#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32 + str r8, [r5, #4] @ Save physical start of kernel (BE) +#else + str r8, [r5] @ Save physical start of kernel (LE) +#endif + orr r3, r8, r7 @ Add the MMU flags + add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER) +1: str r3, [r0], #1 << PMD_ENTRY_ORDER add r3, r3, #1 << SECTION_SHIFT cmp r0, r6 bls 1b + eor r3, r3, r7 @ Remove the MMU flags + adr_l r5, kernel_sec_end @ _pa(kernel_sec_end) +#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32 + str r3, [r5, #4] @ Save physical end of kernel (BE) +#else + str r3, [r5] @ Save physical end of kernel (LE) +#endif #ifdef CONFIG_XIP_KERNEL /* @@ -258,14 +280,14 @@ __create_page_tables: mov r3, pc mov r3, r3, lsr #SECTION_SHIFT orr r3, r7, r3, lsl #SECTION_SHIFT - add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER) - str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]! + add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ENTRY_ORDER) + str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ENTRY_ORDER]! ldr r6, =(_edata_loc - 1) - add r0, r0, #1 << PMD_ORDER - add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) + add r0, r0, #1 << PMD_ENTRY_ORDER + add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER) 1: cmp r0, r6 add r3, r3, #1 << SECTION_SHIFT - strls r3, [r0], #1 << PMD_ORDER + strls r3, [r0], #1 << PMD_ENTRY_ORDER bls 1b #endif @@ -274,12 +296,11 @@ __create_page_tables: * We map 2 sections in case the ATAGs/DTB crosses a section boundary. */ mov r0, r2, lsr #SECTION_SHIFT - movs r0, r0, lsl #SECTION_SHIFT - subne r3, r0, r8 - addne r3, r3, #PAGE_OFFSET - addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) - orrne r6, r7, r0 - strne r6, [r3], #1 << PMD_ORDER + cmp r2, #0 + ldrne r3, =FDT_FIXED_BASE >> (SECTION_SHIFT - PMD_ENTRY_ORDER) + addne r3, r3, r4 + orrne r6, r7, r0, lsl #SECTION_SHIFT + strne r6, [r3], #1 << PMD_ENTRY_ORDER addne r6, r6, #1 << SECTION_SHIFT strne r6, [r3] @@ -298,7 +319,7 @@ __create_page_tables: addruart r7, r3, r0 mov r3, r3, lsr #SECTION_SHIFT - mov r3, r3, lsl #PMD_ORDER + mov r3, r3, lsl #PMD_ENTRY_ORDER add r0, r4, r3 mov r3, r7, lsr #SECTION_SHIFT @@ -323,12 +344,12 @@ __create_page_tables: ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags #endif -#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS) +#if defined(CONFIG_ARCH_NETWINDER) /* * If we're using the NetWinder or CATS, we also need to map * in the 16550-type serial port for the debug messages */ - add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ORDER) + add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ENTRY_ORDER) orr r3, r7, #0x7c000000 str r3, [r0] #endif @@ -338,10 +359,10 @@ __create_page_tables: * Similar reasons here - for debug. This is * only for Acorn RiscPC architectures. */ - add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ORDER) + add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ENTRY_ORDER) orr r3, r7, #0x02000000 str r3, [r0] - add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER) + add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ENTRY_ORDER) str r3, [r0] #endif #endif @@ -351,11 +372,6 @@ __create_page_tables: ret lr ENDPROC(__create_page_tables) .ltorg - .align -__turn_mmu_on_loc: - .long . - .long __turn_mmu_on - .long __turn_mmu_on_end #if defined(CONFIG_SMP) .text @@ -391,10 +407,8 @@ ENTRY(secondary_startup) /* * Use the page tables supplied from __cpu_up. */ - adr r4, __secondary_data - ldmia r4, {r5, r7, r12} @ address to jump to after - sub lr, r4, r5 @ mmu has been enabled - add r3, r7, lr + adr_l r3, secondary_data + mov_l r12, __secondary_switched ldrd r4, r5, [r3, #0] @ get secondary_data.pgdir ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE: ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps @@ -409,22 +423,21 @@ ARM_BE8(eor r4, r4, r5) @ without using a temp reg. ENDPROC(secondary_startup) ENDPROC(secondary_startup_arm) - /* - * r6 = &secondary_data - */ ENTRY(__secondary_switched) - ldr sp, [r7, #12] @ get secondary_data.stack +#if defined(CONFIG_VMAP_STACK) && !defined(CONFIG_ARM_LPAE) + @ Before using the vmap'ed stack, we have to switch to swapper_pg_dir + @ as the ID map does not cover the vmalloc region. + mrc p15, 0, ip, c2, c0, 1 @ read TTBR1 + mcr p15, 0, ip, c2, c0, 0 @ set TTBR0 + instr_sync +#endif + adr_l r7, secondary_data + 12 @ get secondary_data.stack + ldr sp, [r7] + ldr r0, [r7, #4] @ get secondary_data.task mov fp, #0 b secondary_start_kernel ENDPROC(__secondary_switched) - .align - - .type __secondary_data, %object -__secondary_data: - .long . - .long secondary_data - .long __secondary_switched #endif /* defined(CONFIG_SMP) */ @@ -539,19 +552,11 @@ ARM_BE8(rev r0, r0) @ byteswap if big endian retne lr __fixup_smp_on_up: - adr r0, 1f - ldmia r0, {r3 - r5} - sub r3, r0, r3 - add r4, r4, r3 - add r5, r5, r3 + adr_l r4, __smpalt_begin + adr_l r5, __smpalt_end b __do_fixup_smp_on_up ENDPROC(__fixup_smp) - .align -1: .word . - .word __smpalt_begin - .word __smpalt_end - .pushsection .data .align 2 .globl smp_on_up @@ -565,14 +570,15 @@ smp_on_up: __do_fixup_smp_on_up: cmp r4, r5 reths lr - ldmia r4!, {r0, r6} - ARM( str r6, [r0, r3] ) - THUMB( add r0, r0, r3 ) + ldmia r4, {r0, r6} + ARM( str r6, [r0, r4] ) + THUMB( add r0, r0, r4 ) + add r4, r4, #8 #ifdef __ARMEB__ THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian. #endif THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords - THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3. + THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r0. THUMB( strh r6, [r0] ) b __do_fixup_smp_on_up ENDPROC(__do_fixup_smp_on_up) @@ -581,151 +587,8 @@ ENTRY(fixup_smp) stmfd sp!, {r4 - r6, lr} mov r4, r0 add r5, r0, r1 - mov r3, #0 bl __do_fixup_smp_on_up ldmfd sp!, {r4 - r6, pc} ENDPROC(fixup_smp) -#ifdef __ARMEB__ -#define LOW_OFFSET 0x4 -#define HIGH_OFFSET 0x0 -#else -#define LOW_OFFSET 0x0 -#define HIGH_OFFSET 0x4 -#endif - -#ifdef CONFIG_ARM_PATCH_PHYS_VIRT - -/* __fixup_pv_table - patch the stub instructions with the delta between - * PHYS_OFFSET and PAGE_OFFSET, which is assumed to be 16MiB aligned and - * can be expressed by an immediate shifter operand. The stub instruction - * has a form of '(add|sub) rd, rn, #imm'. - */ - __HEAD -__fixup_pv_table: - adr r0, 1f - ldmia r0, {r3-r7} - mvn ip, #0 - subs r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET - add r4, r4, r3 @ adjust table start address - add r5, r5, r3 @ adjust table end address - add r6, r6, r3 @ adjust __pv_phys_pfn_offset address - add r7, r7, r3 @ adjust __pv_offset address - mov r0, r8, lsr #PAGE_SHIFT @ convert to PFN - str r0, [r6] @ save computed PHYS_OFFSET to __pv_phys_pfn_offset - strcc ip, [r7, #HIGH_OFFSET] @ save to __pv_offset high bits - mov r6, r3, lsr #24 @ constant for add/sub instructions - teq r3, r6, lsl #24 @ must be 16MiB aligned -THUMB( it ne @ cross section branch ) - bne __error - str r3, [r7, #LOW_OFFSET] @ save to __pv_offset low bits - b __fixup_a_pv_table -ENDPROC(__fixup_pv_table) - - .align -1: .long . - .long __pv_table_begin - .long __pv_table_end -2: .long __pv_phys_pfn_offset - .long __pv_offset - - .text -__fixup_a_pv_table: - adr r0, 3f - ldr r6, [r0] - add r6, r6, r3 - ldr r0, [r6, #HIGH_OFFSET] @ pv_offset high word - ldr r6, [r6, #LOW_OFFSET] @ pv_offset low word - mov r6, r6, lsr #24 - cmn r0, #1 -#ifdef CONFIG_THUMB2_KERNEL - moveq r0, #0x200000 @ set bit 21, mov to mvn instruction - lsls r6, #24 - beq 2f - clz r7, r6 - lsr r6, #24 - lsl r6, r7 - bic r6, #0x0080 - lsrs r7, #1 - orrcs r6, #0x0080 - orr r6, r6, r7, lsl #12 - orr r6, #0x4000 - b 2f -1: add r7, r3 - ldrh ip, [r7, #2] -ARM_BE8(rev16 ip, ip) - tst ip, #0x4000 - and ip, #0x8f00 - orrne ip, r6 @ mask in offset bits 31-24 - orreq ip, r0 @ mask in offset bits 7-0 -ARM_BE8(rev16 ip, ip) - strh ip, [r7, #2] - bne 2f - ldrh ip, [r7] -ARM_BE8(rev16 ip, ip) - bic ip, #0x20 - orr ip, ip, r0, lsr #16 -ARM_BE8(rev16 ip, ip) - strh ip, [r7] -2: cmp r4, r5 - ldrcc r7, [r4], #4 @ use branch for delay slot - bcc 1b - bx lr -#else -#ifdef CONFIG_CPU_ENDIAN_BE8 - moveq r0, #0x00004000 @ set bit 22, mov to mvn instruction -#else - moveq r0, #0x400000 @ set bit 22, mov to mvn instruction -#endif - b 2f -1: ldr ip, [r7, r3] -#ifdef CONFIG_CPU_ENDIAN_BE8 - @ in BE8, we load data in BE, but instructions still in LE - bic ip, ip, #0xff000000 - tst ip, #0x000f0000 @ check the rotation field - orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24 - biceq ip, ip, #0x00004000 @ clear bit 22 - orreq ip, ip, r0 @ mask in offset bits 7-0 -#else - bic ip, ip, #0x000000ff - tst ip, #0xf00 @ check the rotation field - orrne ip, ip, r6 @ mask in offset bits 31-24 - biceq ip, ip, #0x400000 @ clear bit 22 - orreq ip, ip, r0 @ mask in offset bits 7-0 -#endif - str ip, [r7, r3] -2: cmp r4, r5 - ldrcc r7, [r4], #4 @ use branch for delay slot - bcc 1b - ret lr -#endif -ENDPROC(__fixup_a_pv_table) - - .align -3: .long __pv_offset - -ENTRY(fixup_pv_table) - stmfd sp!, {r4 - r7, lr} - mov r3, #0 @ no offset - mov r4, r0 @ r0 = table start - add r5, r0, r1 @ r1 = table size - bl __fixup_a_pv_table - ldmfd sp!, {r4 - r7, pc} -ENDPROC(fixup_pv_table) - - .data - .align 2 - .globl __pv_phys_pfn_offset - .type __pv_phys_pfn_offset, %object -__pv_phys_pfn_offset: - .word 0 - .size __pv_phys_pfn_offset, . -__pv_phys_pfn_offset - - .globl __pv_offset - .type __pv_offset, %object -__pv_offset: - .quad 0 - .size __pv_offset, . -__pv_offset -#endif - #include "head-common.S" diff --git a/arch/arm/kernel/head.h b/arch/arm/kernel/head.h new file mode 100644 index 000000000000..0eb5accf7141 --- /dev/null +++ b/arch/arm/kernel/head.h @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0-only + +extern char __data_loc[]; +extern char _edata_loc[]; +extern char _sdata[]; + +int __init __inflate_kernel_data(void); diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c index 2373020af965..38a90a3d12b2 100644 --- a/arch/arm/kernel/hibernate.c +++ b/arch/arm/kernel/hibernate.c @@ -19,7 +19,7 @@ #include <asm/system_misc.h> #include <asm/idmap.h> #include <asm/suspend.h> -#include <asm/memory.h> +#include <asm/page.h> #include <asm/sections.h> #include "reboot.h" diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 02ca7adf5375..dc0fb7a81371 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -547,7 +547,7 @@ static int arch_build_bp_info(struct perf_event *bp, if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE) && max_watchpoint_len >= 8) break; - /* Else, fall through */ + fallthrough; default: return -EINVAL; } @@ -612,12 +612,12 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, /* Allow halfword watchpoints and breakpoints. */ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2) break; - /* Else, fall through */ + fallthrough; case 3: /* Allow single byte watchpoint. */ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1) break; - /* Else, fall through */ + fallthrough; default: ret = -EINVAL; goto out; @@ -626,7 +626,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, hw->address &= ~alignment_mask; hw->ctrl.len <<= offset; - if (is_default_overflow_handler(bp)) { + if (uses_default_overflow_handler(bp)) { /* * Mismatch breakpoints are required for single-stepping * breakpoints. @@ -683,26 +683,68 @@ static void disable_single_step(struct perf_event *bp) arch_install_hw_breakpoint(bp); } +/* + * Arm32 hardware does not always report a watchpoint hit address that matches + * one of the watchpoints set. It can also report an address "near" the + * watchpoint if a single instruction access both watched and unwatched + * addresses. There is no straight-forward way, short of disassembling the + * offending instruction, to map that address back to the watchpoint. This + * function computes the distance of the memory access from the watchpoint as a + * heuristic for the likelyhood that a given access triggered the watchpoint. + * + * See this same function in the arm64 platform code, which has the same + * problem. + * + * The function returns the distance of the address from the bytes watched by + * the watchpoint. In case of an exact match, it returns 0. + */ +static u32 get_distance_from_watchpoint(unsigned long addr, u32 val, + struct arch_hw_breakpoint_ctrl *ctrl) +{ + u32 wp_low, wp_high; + u32 lens, lene; + + lens = __ffs(ctrl->len); + lene = __fls(ctrl->len); + + wp_low = val + lens; + wp_high = val + lene; + if (addr < wp_low) + return wp_low - addr; + else if (addr > wp_high) + return addr - wp_high; + else + return 0; +} + +static int watchpoint_fault_on_uaccess(struct pt_regs *regs, + struct arch_hw_breakpoint *info) +{ + return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER; +} + static void watchpoint_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { - int i, access; - u32 val, ctrl_reg, alignment_mask; + int i, access, closest_match = 0; + u32 min_dist = -1, dist; + u32 val, ctrl_reg; struct perf_event *wp, **slots; struct arch_hw_breakpoint *info; struct arch_hw_breakpoint_ctrl ctrl; slots = this_cpu_ptr(wp_on_reg); + /* + * Find all watchpoints that match the reported address. If no exact + * match is found. Attribute the hit to the closest watchpoint. + */ + rcu_read_lock(); for (i = 0; i < core_num_wrps; ++i) { - rcu_read_lock(); - wp = slots[i]; - if (wp == NULL) - goto unlock; + continue; - info = counter_arch_bp(wp); /* * The DFAR is an unknown value on debug architectures prior * to 7.1. Since we only allow a single watchpoint on these @@ -711,50 +753,69 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, */ if (debug_arch < ARM_DEBUG_ARCH_V7_1) { BUG_ON(i > 0); + info = counter_arch_bp(wp); info->trigger = wp->attr.bp_addr; } else { - if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) - alignment_mask = 0x7; - else - alignment_mask = 0x3; - - /* Check if the watchpoint value matches. */ - val = read_wb_reg(ARM_BASE_WVR + i); - if (val != (addr & ~alignment_mask)) - goto unlock; - - /* Possible match, check the byte address select. */ - ctrl_reg = read_wb_reg(ARM_BASE_WCR + i); - decode_ctrl_reg(ctrl_reg, &ctrl); - if (!((1 << (addr & alignment_mask)) & ctrl.len)) - goto unlock; - /* Check that the access type matches. */ if (debug_exception_updates_fsr()) { access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W : HW_BREAKPOINT_R; if (!(access & hw_breakpoint_type(wp))) - goto unlock; + continue; + } + + val = read_wb_reg(ARM_BASE_WVR + i); + ctrl_reg = read_wb_reg(ARM_BASE_WCR + i); + decode_ctrl_reg(ctrl_reg, &ctrl); + dist = get_distance_from_watchpoint(addr, val, &ctrl); + if (dist < min_dist) { + min_dist = dist; + closest_match = i; } + /* Is this an exact match? */ + if (dist != 0) + continue; /* We have a winner. */ + info = counter_arch_bp(wp); info->trigger = addr; } pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); + + /* + * If we triggered a user watchpoint from a uaccess routine, + * then handle the stepping ourselves since userspace really + * can't help us with this. + */ + if (watchpoint_fault_on_uaccess(regs, info)) + goto step; + perf_bp_event(wp, regs); /* - * If no overflow handler is present, insert a temporary - * mismatch breakpoint so we can single-step over the - * watchpoint trigger. + * Defer stepping to the overflow handler if one is installed. + * Otherwise, insert a temporary mismatch breakpoint so that + * we can single-step over the watchpoint trigger. */ - if (is_default_overflow_handler(wp)) - enable_single_step(wp, instruction_pointer(regs)); + if (!uses_default_overflow_handler(wp)) + continue; +step: + enable_single_step(wp, instruction_pointer(regs)); + } -unlock: - rcu_read_unlock(); + if (min_dist > 0 && min_dist != -1) { + /* No exact match found. */ + wp = slots[closest_match]; + info = counter_arch_bp(wp); + info->trigger = addr; + pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); + perf_bp_event(wp, regs); + if (uses_default_overflow_handler(wp)) + enable_single_step(wp, instruction_pointer(regs)); } + + rcu_read_unlock(); } static void watchpoint_single_step_handler(unsigned long pc) @@ -825,7 +886,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs) info->trigger = addr; pr_debug("breakpoint fired: address = 0x%x\n", addr); perf_bp_event(bp, regs); - if (!bp->overflow_handler) + if (uses_default_overflow_handler(bp)) enable_single_step(bp, addr); goto unlock; } @@ -867,7 +928,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, break; case ARM_ENTRY_ASYNC_WATCHPOINT: WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n"); - /* Fall through */ + fallthrough; case ARM_ENTRY_SYNC_WATCHPOINT: watchpoint_handler(addr, fsr, regs); break; @@ -880,6 +941,23 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, return ret; } +#ifdef CONFIG_ARM_ERRATA_764319 +static int oslsr_fault; + +static int debug_oslsr_trap(struct pt_regs *regs, unsigned int instr) +{ + oslsr_fault = 1; + instruction_pointer(regs) += 4; + return 0; +} + +static struct undef_hook debug_oslsr_hook = { + .instr_mask = 0xffffffff, + .instr_val = 0xee115e91, + .fn = debug_oslsr_trap, +}; +#endif + /* * One-time initialisation. */ @@ -913,10 +991,19 @@ static bool core_has_os_save_restore(void) case ARM_DEBUG_ARCH_V7_1: return true; case ARM_DEBUG_ARCH_V7_ECP14: +#ifdef CONFIG_ARM_ERRATA_764319 + oslsr_fault = 0; + register_undef_hook(&debug_oslsr_hook); ARM_DBG_READ(c1, c1, 4, oslsr); + unregister_undef_hook(&debug_oslsr_hook); + if (oslsr_fault) + return false; +#else + ARM_DBG_READ(c1, c1, 4, oslsr); +#endif if (oslsr & ARM_OSLSR_OSLM0) return true; - /* Else, fall through */ + fallthrough; default: return false; } diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index ae5020302de4..3a506b9095a5 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S @@ -9,6 +9,8 @@ #include <asm/assembler.h> #include <asm/virt.h> +.arch armv7-a + #ifndef ZIMAGE /* * For the kernel proper, we need to find out the CPU boot mode long after @@ -24,41 +26,38 @@ ENTRY(__boot_cpu_mode) .text /* - * Save the primary CPU boot mode. Requires 3 scratch registers. + * Save the primary CPU boot mode. Requires 2 scratch registers. */ - .macro store_primary_cpu_mode reg1, reg2, reg3 + .macro store_primary_cpu_mode reg1, reg2 mrs \reg1, cpsr and \reg1, \reg1, #MODE_MASK - adr \reg2, .L__boot_cpu_mode_offset - ldr \reg3, [\reg2] - str \reg1, [\reg2, \reg3] + str_l \reg1, __boot_cpu_mode, \reg2 .endm /* * Compare the current mode with the one saved on the primary CPU. * If they don't match, record that fact. The Z bit indicates * if there's a match or not. - * Requires 3 additionnal scratch registers. + * Requires 2 additional scratch registers. */ - .macro compare_cpu_mode_with_primary mode, reg1, reg2, reg3 - adr \reg2, .L__boot_cpu_mode_offset - ldr \reg3, [\reg2] - ldr \reg1, [\reg2, \reg3] + .macro compare_cpu_mode_with_primary mode, reg1, reg2 + adr_l \reg2, __boot_cpu_mode + ldr \reg1, [\reg2] cmp \mode, \reg1 @ matches primary CPU boot mode? orrne \reg1, \reg1, #BOOT_CPU_MODE_MISMATCH - strne \reg1, [\reg2, \reg3] @ record what happened and give up + strne \reg1, [\reg2] @ record what happened and give up .endm #else /* ZIMAGE */ - .macro store_primary_cpu_mode reg1:req, reg2:req, reg3:req + .macro store_primary_cpu_mode reg1:req, reg2:req .endm /* * The zImage loader only runs on one CPU, so we don't bother with mult-CPU * consistency checking: */ - .macro compare_cpu_mode_with_primary mode, reg1, reg2, reg3 + .macro compare_cpu_mode_with_primary mode, reg1, reg2 cmp \mode, \mode .endm @@ -73,7 +72,7 @@ ENTRY(__boot_cpu_mode) */ @ Call this from the primary CPU ENTRY(__hyp_stub_install) - store_primary_cpu_mode r4, r5, r6 + store_primary_cpu_mode r4, r5 ENDPROC(__hyp_stub_install) @ fall through... @@ -87,7 +86,7 @@ ENTRY(__hyp_stub_install_secondary) * If the secondary has booted with a different mode, give up * immediately. */ - compare_cpu_mode_with_primary r4, r5, r6, r7 + compare_cpu_mode_with_primary r4, r5, r6 retne lr /* @@ -146,10 +145,9 @@ ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE #if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER) @ make CNTP_* and CNTPCT accessible from PL1 mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1 - lsr r7, #16 - and r7, #0xf - cmp r7, #1 - bne 1f + ubfx r7, r7, #16, #4 + teq r7, #0 + beq 1f mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL orr r7, r7, #3 @ PL1PCEN | PL1PCTEN mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL @@ -190,19 +188,19 @@ ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE ENDPROC(__hyp_stub_install_secondary) __hyp_stub_do_trap: +#ifdef ZIMAGE teq r0, #HVC_SET_VECTORS bne 1f + /* Only the ZIMAGE stubs can change the HYP vectors */ mcr p15, 4, r1, c12, c0, 0 @ set HVBAR b __hyp_stub_exit +#endif 1: teq r0, #HVC_SOFT_RESTART - bne 1f + bne 2f bx r1 -1: teq r0, #HVC_RESET_VECTORS - beq __hyp_stub_exit - - ldr r0, =HVC_STUB_ERR +2: ldr r0, =HVC_STUB_ERR __ERET __hyp_stub_exit: @@ -211,26 +209,9 @@ __hyp_stub_exit: ENDPROC(__hyp_stub_do_trap) /* - * __hyp_set_vectors: Call this after boot to set the initial hypervisor - * vectors as part of hypervisor installation. On an SMP system, this should - * be called on each CPU. - * - * r0 must be the physical address of the new vector table (which must lie in - * the bottom 4GB of physical address space. - * - * r0 must be 32-byte aligned. - * - * Before calling this, you must check that the stub hypervisor is installed - * everywhere, by waiting for any secondary CPUs to be brought up and then - * checking that BOOT_CPU_MODE_HAVE_HYP(__boot_cpu_mode) is true. - * - * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or - * something else went wrong... in such cases, trying to install a new - * hypervisor is unlikely to work as desired. - * - * When you call into your shiny new hypervisor, sp_hyp will contain junk, - * so you will need to set that to something sensible at the new hypervisor's - * initialisation entry point. + * __hyp_set_vectors is only used when ZIMAGE must bounce between HYP + * and SVC. For the kernel itself, the vectors are set once and for + * all by the stubs. */ ENTRY(__hyp_set_vectors) mov r1, r0 @@ -246,18 +227,6 @@ ENTRY(__hyp_soft_restart) ret lr ENDPROC(__hyp_soft_restart) -ENTRY(__hyp_reset_vectors) - mov r0, #HVC_RESET_VECTORS - __HVC(0) - ret lr -ENDPROC(__hyp_reset_vectors) - -#ifndef ZIMAGE -.align 2 -.L__boot_cpu_mode_offset: - .long __boot_cpu_mode - . -#endif - .align 5 ENTRY(__hyp_stub_vectors) __hyp_stub_reset: W(b) . diff --git a/arch/arm/kernel/insn.c b/arch/arm/kernel/insn.c index 2e844b70386b..db0acbb7d7a0 100644 --- a/arch/arm/kernel/insn.c +++ b/arch/arm/kernel/insn.c @@ -3,8 +3,9 @@ #include <linux/kernel.h> #include <asm/opcodes.h> -static unsigned long -__arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link) +static unsigned long __arm_gen_branch_thumb2(unsigned long pc, + unsigned long addr, bool link, + bool warn) { unsigned long s, j1, j2, i1, i2, imm10, imm11; unsigned long first, second; @@ -12,7 +13,7 @@ __arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link) offset = (long)addr - (long)(pc + 4); if (offset < -16777216 || offset > 16777214) { - WARN_ON_ONCE(1); + WARN_ON_ONCE(warn); return 0; } @@ -33,8 +34,8 @@ __arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link) return __opcode_thumb32_compose(first, second); } -static unsigned long -__arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link) +static unsigned long __arm_gen_branch_arm(unsigned long pc, unsigned long addr, + bool link, bool warn) { unsigned long opcode = 0xea000000; long offset; @@ -44,7 +45,7 @@ __arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link) offset = (long)addr - (long)(pc + 8); if (unlikely(offset < -33554432 || offset > 33554428)) { - WARN_ON_ONCE(1); + WARN_ON_ONCE(warn); return 0; } @@ -54,10 +55,10 @@ __arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link) } unsigned long -__arm_gen_branch(unsigned long pc, unsigned long addr, bool link) +__arm_gen_branch(unsigned long pc, unsigned long addr, bool link, bool warn) { if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) - return __arm_gen_branch_thumb2(pc, addr, link); + return __arm_gen_branch_thumb2(pc, addr, link, warn); else - return __arm_gen_branch_arm(pc, addr, link); + return __arm_gen_branch_arm(pc, addr, link, warn); } diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index ee514034c0a1..fe28fc1f759d 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -18,7 +18,6 @@ * IRQ's are in fact implemented a bit like signal handlers for the kernel. * Naturally it's not a 1:1 relation, but there are similarities. */ -#include <linux/kernel_stat.h> #include <linux/signal.h> #include <linux/ioport.h> #include <linux/interrupt.h> @@ -37,13 +36,54 @@ #include <asm/hardware/cache-l2x0.h> #include <asm/hardware/cache-uniphier.h> #include <asm/outercache.h> +#include <asm/softirq_stack.h> #include <asm/exception.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> #include <asm/mach/time.h> +#include "reboot.h" + unsigned long irq_err_count; +#ifdef CONFIG_IRQSTACKS + +asmlinkage DEFINE_PER_CPU_READ_MOSTLY(u8 *, irq_stack_ptr); + +static void __init init_irq_stacks(void) +{ + u8 *stack; + int cpu; + + for_each_possible_cpu(cpu) { + if (!IS_ENABLED(CONFIG_VMAP_STACK)) + stack = (u8 *)__get_free_pages(GFP_KERNEL, + THREAD_SIZE_ORDER); + else + stack = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, + THREADINFO_GFP, NUMA_NO_NODE, + __builtin_return_address(0)); + + if (WARN_ON(!stack)) + break; + per_cpu(irq_stack_ptr, cpu) = &stack[THREAD_SIZE]; + } +} + +#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK +static void ____do_softirq(void *arg) +{ + __do_softirq(); +} + +void do_softirq_own_stack(void) +{ + call_with_stack(____do_softirq, NULL, + __this_cpu_read(irq_stack_ptr)); +} +#endif +#endif + int arch_show_interrupts(struct seq_file *p, int prec) { #ifdef CONFIG_FIQ @@ -64,22 +104,31 @@ int arch_show_interrupts(struct seq_file *p, int prec) */ void handle_IRQ(unsigned int irq, struct pt_regs *regs) { - __handle_domain_irq(NULL, irq, false, regs); -} + struct irq_desc *desc; -/* - * asm_do_IRQ is the interface to be used from assembly code. - */ -asmlinkage void __exception_irq_entry -asm_do_IRQ(unsigned int irq, struct pt_regs *regs) -{ - handle_IRQ(irq, regs); + /* + * Some hardware gives randomly wrong interrupts. Rather + * than crashing, do something sensible. + */ + if (unlikely(!irq || irq >= nr_irqs)) + desc = NULL; + else + desc = irq_to_desc(irq); + + if (likely(desc)) + handle_irq_desc(desc); + else + ack_bad_irq(irq); } void __init init_IRQ(void) { int ret; +#ifdef CONFIG_IRQSTACKS + init_irq_stacks(); +#endif + if (IS_ENABLED(CONFIG_OF) && !machine_desc->init_irq) irqchip_init(); else diff --git a/arch/arm/kernel/isa.c b/arch/arm/kernel/isa.c index d8a509c5d5bd..905b1b191546 100644 --- a/arch/arm/kernel/isa.c +++ b/arch/arm/kernel/isa.c @@ -16,7 +16,7 @@ static unsigned int isa_membase, isa_portbase, isa_portshift; -static struct ctl_table ctl_isa_vars[4] = { +static struct ctl_table ctl_isa_vars[] = { { .procname = "membase", .data = &isa_membase, @@ -35,32 +35,16 @@ static struct ctl_table ctl_isa_vars[4] = { .maxlen = sizeof(isa_portshift), .mode = 0444, .proc_handler = proc_dointvec, - }, {} + }, }; static struct ctl_table_header *isa_sysctl_header; -static struct ctl_table ctl_isa[2] = { - { - .procname = "isa", - .mode = 0555, - .child = ctl_isa_vars, - }, {} -}; - -static struct ctl_table ctl_bus[2] = { - { - .procname = "bus", - .mode = 0555, - .child = ctl_isa, - }, {} -}; - void __init register_isa_ports(unsigned int membase, unsigned int portbase, unsigned int portshift) { isa_membase = membase; isa_portbase = portbase; isa_portshift = portshift; - isa_sysctl_header = register_sysctl_table(ctl_bus); + isa_sysctl_header = register_sysctl("bus/isa", ctl_isa_vars); } diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S index 0dcae787b004..4a335d3c5969 100644 --- a/arch/arm/kernel/iwmmxt.S +++ b/arch/arm/kernel/iwmmxt.S @@ -16,18 +16,7 @@ #include <asm/thread_info.h> #include <asm/asm-offsets.h> #include <asm/assembler.h> - -#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B) -#define PJ4(code...) code -#define XSC(code...) -#elif defined(CONFIG_CPU_MOHAWK) || \ - defined(CONFIG_CPU_XSC3) || \ - defined(CONFIG_CPU_XSCALE) -#define PJ4(code...) -#define XSC(code...) code -#else -#error "Unsupported iWMMXt architecture" -#endif +#include "iwmmxt.h" #define MMX_WR0 (0x00) #define MMX_WR1 (0x08) @@ -57,9 +46,19 @@ .text .arm +ENTRY(iwmmxt_undef_handler) + push {r9, r10, lr} + get_thread_info r10 + mov r9, pc + b iwmmxt_task_enable + mov r0, #0 + pop {r9, r10, pc} +ENDPROC(iwmmxt_undef_handler) + /* * Lazy switching of Concan coprocessor context * + * r0 = struct pt_regs pointer * r10 = struct thread_info pointer * r9 = ret_from_exception * lr = undefined instr exit @@ -70,25 +69,21 @@ ENTRY(iwmmxt_task_enable) inc_preempt_count r10, r3 - XSC(mrc p15, 0, r2, c15, c1, 0) - PJ4(mrc p15, 0, r2, c1, c0, 2) + mrc p15, 0, r2, c15, c1, 0 @ CP0 and CP1 accessible? - XSC(tst r2, #0x3) - PJ4(tst r2, #0xf) + tst r2, #0x3 bne 4f @ if so no business here @ enable access to CP0 and CP1 - XSC(orr r2, r2, #0x3) - XSC(mcr p15, 0, r2, c15, c1, 0) - PJ4(orr r2, r2, #0xf) - PJ4(mcr p15, 0, r2, c1, c0, 2) + orr r2, r2, #0x3 + mcr p15, 0, r2, c15, c1, 0 ldr r3, =concan_owner - add r0, r10, #TI_IWMMXT_STATE @ get task Concan save area - ldr r2, [sp, #60] @ current task pc value + ldr r2, [r0, #S_PC] @ current task pc value ldr r1, [r3] @ get current Concan owner - str r0, [r3] @ this task now owns Concan regs sub r2, r2, #4 @ adjust pc back - str r2, [sp, #60] + str r2, [r0, #S_PC] + add r0, r10, #TI_IWMMXT_STATE @ get task Concan save area + str r0, [r3] @ this task now owns Concan regs mrc p15, 0, r2, c2, c0, 0 mov r2, r2 @ cpwait @@ -113,33 +108,33 @@ concan_save: concan_dump: - wstrw wCSSF, [r1, #MMX_WCSSF] - wstrw wCASF, [r1, #MMX_WCASF] - wstrw wCGR0, [r1, #MMX_WCGR0] - wstrw wCGR1, [r1, #MMX_WCGR1] - wstrw wCGR2, [r1, #MMX_WCGR2] - wstrw wCGR3, [r1, #MMX_WCGR3] + wstrw wCSSF, r1, MMX_WCSSF + wstrw wCASF, r1, MMX_WCASF + wstrw wCGR0, r1, MMX_WCGR0 + wstrw wCGR1, r1, MMX_WCGR1 + wstrw wCGR2, r1, MMX_WCGR2 + wstrw wCGR3, r1, MMX_WCGR3 1: @ MUP? wRn tst r2, #0x2 beq 2f - wstrd wR0, [r1, #MMX_WR0] - wstrd wR1, [r1, #MMX_WR1] - wstrd wR2, [r1, #MMX_WR2] - wstrd wR3, [r1, #MMX_WR3] - wstrd wR4, [r1, #MMX_WR4] - wstrd wR5, [r1, #MMX_WR5] - wstrd wR6, [r1, #MMX_WR6] - wstrd wR7, [r1, #MMX_WR7] - wstrd wR8, [r1, #MMX_WR8] - wstrd wR9, [r1, #MMX_WR9] - wstrd wR10, [r1, #MMX_WR10] - wstrd wR11, [r1, #MMX_WR11] - wstrd wR12, [r1, #MMX_WR12] - wstrd wR13, [r1, #MMX_WR13] - wstrd wR14, [r1, #MMX_WR14] - wstrd wR15, [r1, #MMX_WR15] + wstrd wR0, r1, MMX_WR0 + wstrd wR1, r1, MMX_WR1 + wstrd wR2, r1, MMX_WR2 + wstrd wR3, r1, MMX_WR3 + wstrd wR4, r1, MMX_WR4 + wstrd wR5, r1, MMX_WR5 + wstrd wR6, r1, MMX_WR6 + wstrd wR7, r1, MMX_WR7 + wstrd wR8, r1, MMX_WR8 + wstrd wR9, r1, MMX_WR9 + wstrd wR10, r1, MMX_WR10 + wstrd wR11, r1, MMX_WR11 + wstrd wR12, r1, MMX_WR12 + wstrd wR13, r1, MMX_WR13 + wstrd wR14, r1, MMX_WR14 + wstrd wR15, r1, MMX_WR15 2: teq r0, #0 @ anything to load? reteq lr @ if not, return @@ -147,30 +142,30 @@ concan_dump: concan_load: @ Load wRn - wldrd wR0, [r0, #MMX_WR0] - wldrd wR1, [r0, #MMX_WR1] - wldrd wR2, [r0, #MMX_WR2] - wldrd wR3, [r0, #MMX_WR3] - wldrd wR4, [r0, #MMX_WR4] - wldrd wR5, [r0, #MMX_WR5] - wldrd wR6, [r0, #MMX_WR6] - wldrd wR7, [r0, #MMX_WR7] - wldrd wR8, [r0, #MMX_WR8] - wldrd wR9, [r0, #MMX_WR9] - wldrd wR10, [r0, #MMX_WR10] - wldrd wR11, [r0, #MMX_WR11] - wldrd wR12, [r0, #MMX_WR12] - wldrd wR13, [r0, #MMX_WR13] - wldrd wR14, [r0, #MMX_WR14] - wldrd wR15, [r0, #MMX_WR15] + wldrd wR0, r0, MMX_WR0 + wldrd wR1, r0, MMX_WR1 + wldrd wR2, r0, MMX_WR2 + wldrd wR3, r0, MMX_WR3 + wldrd wR4, r0, MMX_WR4 + wldrd wR5, r0, MMX_WR5 + wldrd wR6, r0, MMX_WR6 + wldrd wR7, r0, MMX_WR7 + wldrd wR8, r0, MMX_WR8 + wldrd wR9, r0, MMX_WR9 + wldrd wR10, r0, MMX_WR10 + wldrd wR11, r0, MMX_WR11 + wldrd wR12, r0, MMX_WR12 + wldrd wR13, r0, MMX_WR13 + wldrd wR14, r0, MMX_WR14 + wldrd wR15, r0, MMX_WR15 @ Load wCx - wldrw wCSSF, [r0, #MMX_WCSSF] - wldrw wCASF, [r0, #MMX_WCASF] - wldrw wCGR0, [r0, #MMX_WCGR0] - wldrw wCGR1, [r0, #MMX_WCGR1] - wldrw wCGR2, [r0, #MMX_WCGR2] - wldrw wCGR3, [r0, #MMX_WCGR3] + wldrw wCSSF, r0, MMX_WCSSF + wldrw wCASF, r0, MMX_WCASF + wldrw wCGR0, r0, MMX_WCGR0 + wldrw wCGR1, r0, MMX_WCGR1 + wldrw wCGR2, r0, MMX_WCGR2 + wldrw wCGR3, r0, MMX_WCGR3 @ clear CUP/MUP (only if r1 != 0) teq r1, #0 @@ -207,12 +202,9 @@ ENTRY(iwmmxt_task_disable) bne 1f @ no: quit @ enable access to CP0 and CP1 - XSC(mrc p15, 0, r4, c15, c1, 0) - XSC(orr r4, r4, #0x3) - XSC(mcr p15, 0, r4, c15, c1, 0) - PJ4(mrc p15, 0, r4, c1, c0, 2) - PJ4(orr r4, r4, #0xf) - PJ4(mcr p15, 0, r4, c1, c0, 2) + mrc p15, 0, r4, c15, c1, 0 + orr r4, r4, #0x3 + mcr p15, 0, r4, c15, c1, 0 mov r0, #0 @ nothing to load str r0, [r3] @ no more current owner @@ -221,10 +213,8 @@ ENTRY(iwmmxt_task_disable) bl concan_save @ disable access to CP0 and CP1 - XSC(bic r4, r4, #0x3) - XSC(mcr p15, 0, r4, c15, c1, 0) - PJ4(bic r4, r4, #0xf) - PJ4(mcr p15, 0, r4, c1, c0, 2) + bic r4, r4, #0x3 + mcr p15, 0, r4, c15, c1, 0 mrc p15, 0, r2, c2, c0, 0 mov r2, r2 @ cpwait @@ -319,11 +309,9 @@ ENDPROC(iwmmxt_task_restore) */ ENTRY(iwmmxt_task_switch) - XSC(mrc p15, 0, r1, c15, c1, 0) - PJ4(mrc p15, 0, r1, c1, c0, 2) + mrc p15, 0, r1, c15, c1, 0 @ CP0 and CP1 accessible? - XSC(tst r1, #0x3) - PJ4(tst r1, #0xf) + tst r1, #0x3 bne 1f @ yes: block them for next task ldr r2, =concan_owner @@ -333,10 +321,8 @@ ENTRY(iwmmxt_task_switch) retne lr @ no: leave Concan disabled 1: @ flip Concan access - XSC(eor r1, r1, #0x3) - XSC(mcr p15, 0, r1, c15, c1, 0) - PJ4(eor r1, r1, #0xf) - PJ4(mcr p15, 0, r1, c1, c0, 2) + eor r1, r1, #0x3 + mcr p15, 0, r1, c15, c1, 0 mrc p15, 0, r1, c2, c0, 0 sub pc, lr, r1, lsr #32 @ cpwait and return diff --git a/arch/arm/kernel/iwmmxt.h b/arch/arm/kernel/iwmmxt.h new file mode 100644 index 000000000000..fb627286f5bb --- /dev/null +++ b/arch/arm/kernel/iwmmxt.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __IWMMXT_H__ +#define __IWMMXT_H__ + +.irp b, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 +.set .LwR\b, \b +.set .Lr\b, \b +.endr + +.set .LwCSSF, 0x2 +.set .LwCASF, 0x3 +.set .LwCGR0, 0x8 +.set .LwCGR1, 0x9 +.set .LwCGR2, 0xa +.set .LwCGR3, 0xb + +.macro wldrd, reg:req, base:req, offset:req +.inst 0xedd00100 | (.L\reg << 12) | (.L\base << 16) | (\offset >> 2) +.endm + +.macro wldrw, reg:req, base:req, offset:req +.inst 0xfd900100 | (.L\reg << 12) | (.L\base << 16) | (\offset >> 2) +.endm + +.macro wstrd, reg:req, base:req, offset:req +.inst 0xedc00100 | (.L\reg << 12) | (.L\base << 16) | (\offset >> 2) +.endm + +.macro wstrw, reg:req, base:req, offset:req +.inst 0xfd800100 | (.L\reg << 12) | (.L\base << 16) | (\offset >> 2) +.endm + +#ifdef __clang__ + +#define wCon c1 + +.macro tmrc, dest:req, control:req +mrc p1, 0, \dest, \control, c0, 0 +.endm + +.macro tmcr, control:req, src:req +mcr p1, 0, \src, \control, c0, 0 +.endm +#endif + +#endif diff --git a/arch/arm/kernel/jump_label.c b/arch/arm/kernel/jump_label.c index 303b3ab87f7e..eb9c24b6e8e2 100644 --- a/arch/arm/kernel/jump_label.c +++ b/arch/arm/kernel/jump_label.c @@ -27,9 +27,3 @@ void arch_jump_label_transform(struct jump_entry *entry, { __arch_jump_label_transform(entry, type, false); } - -void arch_jump_label_transform_static(struct jump_entry *entry, - enum jump_label_type type) -{ - __arch_jump_label_transform(entry, type, true); -} diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index 6a95b9296640..22f937e6f3ff 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c @@ -154,22 +154,38 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr) return 0; } -static struct undef_hook kgdb_brkpt_hook = { +static struct undef_hook kgdb_brkpt_arm_hook = { .instr_mask = 0xffffffff, .instr_val = KGDB_BREAKINST, - .cpsr_mask = MODE_MASK, + .cpsr_mask = PSR_T_BIT | MODE_MASK, .cpsr_val = SVC_MODE, .fn = kgdb_brk_fn }; -static struct undef_hook kgdb_compiled_brkpt_hook = { +static struct undef_hook kgdb_brkpt_thumb_hook = { + .instr_mask = 0xffff, + .instr_val = KGDB_BREAKINST & 0xffff, + .cpsr_mask = PSR_T_BIT | MODE_MASK, + .cpsr_val = PSR_T_BIT | SVC_MODE, + .fn = kgdb_brk_fn +}; + +static struct undef_hook kgdb_compiled_brkpt_arm_hook = { .instr_mask = 0xffffffff, .instr_val = KGDB_COMPILED_BREAK, - .cpsr_mask = MODE_MASK, + .cpsr_mask = PSR_T_BIT | MODE_MASK, .cpsr_val = SVC_MODE, .fn = kgdb_compiled_brk_fn }; +static struct undef_hook kgdb_compiled_brkpt_thumb_hook = { + .instr_mask = 0xffff, + .instr_val = KGDB_COMPILED_BREAK & 0xffff, + .cpsr_mask = PSR_T_BIT | MODE_MASK, + .cpsr_val = PSR_T_BIT | SVC_MODE, + .fn = kgdb_compiled_brk_fn +}; + static int __kgdb_notify(struct die_args *args, unsigned long cmd) { struct pt_regs *regs = args->regs; @@ -210,8 +226,10 @@ int kgdb_arch_init(void) if (ret != 0) return ret; - register_undef_hook(&kgdb_brkpt_hook); - register_undef_hook(&kgdb_compiled_brkpt_hook); + register_undef_hook(&kgdb_brkpt_arm_hook); + register_undef_hook(&kgdb_brkpt_thumb_hook); + register_undef_hook(&kgdb_compiled_brkpt_arm_hook); + register_undef_hook(&kgdb_compiled_brkpt_thumb_hook); return 0; } @@ -224,8 +242,10 @@ int kgdb_arch_init(void) */ void kgdb_arch_exit(void) { - unregister_undef_hook(&kgdb_brkpt_hook); - unregister_undef_hook(&kgdb_compiled_brkpt_hook); + unregister_undef_hook(&kgdb_brkpt_arm_hook); + unregister_undef_hook(&kgdb_brkpt_thumb_hook); + unregister_undef_hook(&kgdb_compiled_brkpt_arm_hook); + unregister_undef_hook(&kgdb_compiled_brkpt_thumb_hook); unregister_die_notifier(&kgdb_notifier); } @@ -236,7 +256,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) /* patch_text() only supports int-sized breakpoints */ BUILD_BUG_ON(sizeof(int) != BREAK_INSTR_SIZE); - err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, + err = copy_from_kernel_nofault(bpt->saved_instr, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); if (err) return err; diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 76300f3813e8..80ceb5bd2680 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -10,11 +10,10 @@ #include <linux/io.h> #include <linux/irq.h> #include <linux/memblock.h> -#include <asm/pgtable.h> #include <linux/of_fdt.h> -#include <asm/pgalloc.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> +#include <asm/kexec-internal.h> #include <asm/fncpy.h> #include <asm/mach-types.h> #include <asm/smp_plat.h> @@ -24,11 +23,6 @@ extern void relocate_new_kernel(void); extern const unsigned int relocate_new_kernel_size; -extern unsigned long kexec_start_address; -extern unsigned long kexec_indirection_page; -extern unsigned long kexec_mach_type; -extern unsigned long kexec_boot_atags; - static atomic_t waiting_for_crash_ipi; /* @@ -79,10 +73,12 @@ void machine_kexec_cleanup(struct kimage *image) { } -void machine_crash_nonpanic_core(void *unused) +static void machine_crash_nonpanic_core(void *unused) { struct pt_regs regs; + local_fiq_disable(); + crash_setup_regs(®s, get_irq_regs()); printk(KERN_DEBUG "CPU %u will stop doing anything useful since another CPU has crashed\n", smp_processor_id()); @@ -98,16 +94,28 @@ void machine_crash_nonpanic_core(void *unused) } } +static DEFINE_PER_CPU(call_single_data_t, cpu_stop_csd) = + CSD_INIT(machine_crash_nonpanic_core, NULL); + void crash_smp_send_stop(void) { static int cpus_stopped; unsigned long msecs; + call_single_data_t *csd; + int cpu, this_cpu = raw_smp_processor_id(); if (cpus_stopped) return; atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); - smp_call_function(machine_crash_nonpanic_core, NULL, false); + for_each_online_cpu(cpu) { + if (cpu == this_cpu) + continue; + + csd = &per_cpu(cpu_stop_csd, cpu); + smp_call_function_single_async(cpu, csd); + } + msecs = 1000; /* Wait at most a second for the other cpus to stop */ while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { mdelay(1); @@ -153,14 +161,10 @@ void machine_crash_shutdown(struct pt_regs *regs) pr_info("Loading crashdump kernel...\n"); } -/* - * Function pointer to optional machine-specific reinitialization - */ -void (*kexec_reinit)(void); - void machine_kexec(struct kimage *image) { unsigned long page_list, reboot_entry_phys; + struct kexec_relocate_data *data; void (*reboot_entry)(void); void *reboot_code_buffer; @@ -176,32 +180,21 @@ void machine_kexec(struct kimage *image) reboot_code_buffer = page_address(image->control_code_page); - /* Prepare parameters for reboot_code_buffer*/ - set_kernel_text_rw(); - kexec_start_address = image->start; - kexec_indirection_page = page_list; - kexec_mach_type = machine_arch_type; - kexec_boot_atags = image->arch.kernel_r2; - /* copy our kernel relocation code to the control code page */ reboot_entry = fncpy(reboot_code_buffer, &relocate_new_kernel, relocate_new_kernel_size); + data = reboot_code_buffer + relocate_new_kernel_size; + data->kexec_start_address = image->start; + data->kexec_indirection_page = page_list; + data->kexec_mach_type = machine_arch_type; + data->kexec_r2 = image->arch.kernel_r2; + /* get the identity mapping physical address for the reboot code */ reboot_entry_phys = virt_to_idmap(reboot_entry); pr_info("Bye!\n"); - if (kexec_reinit) - kexec_reinit(); - soft_restart(reboot_entry_phys); } - -void arch_crash_save_vmcoreinfo(void) -{ -#ifdef CONFIG_ARM_LPAE - VMCOREINFO_CONFIG(ARM_LPAE); -#endif -} diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c index 6e626abaefc5..da2ee8d6ef1a 100644 --- a/arch/arm/kernel/module-plts.c +++ b/arch/arm/kernel/module-plts.c @@ -4,6 +4,7 @@ */ #include <linux/elf.h> +#include <linux/ftrace.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/sort.h> @@ -12,10 +13,6 @@ #include <asm/cache.h> #include <asm/opcodes.h> -#define PLT_ENT_STRIDE L1_CACHE_BYTES -#define PLT_ENT_COUNT (PLT_ENT_STRIDE / sizeof(u32)) -#define PLT_ENT_SIZE (sizeof(struct plt_entries) / PLT_ENT_COUNT) - #ifdef CONFIG_THUMB2_KERNEL #define PLT_ENT_LDR __opcode_to_mem_thumb32(0xf8dff000 | \ (PLT_ENT_STRIDE - 4)) @@ -24,24 +21,47 @@ (PLT_ENT_STRIDE - 8)) #endif -struct plt_entries { - u32 ldr[PLT_ENT_COUNT]; - u32 lit[PLT_ENT_COUNT]; +static const u32 fixed_plts[] = { +#ifdef CONFIG_DYNAMIC_FTRACE + FTRACE_ADDR, + MCOUNT_ADDR, +#endif }; -static bool in_init(const struct module *mod, unsigned long loc) +static void prealloc_fixed(struct mod_plt_sec *pltsec, struct plt_entries *plt) { - return loc - (u32)mod->init_layout.base < mod->init_layout.size; + int i; + + if (!ARRAY_SIZE(fixed_plts) || pltsec->plt_count) + return; + pltsec->plt_count = ARRAY_SIZE(fixed_plts); + + for (i = 0; i < ARRAY_SIZE(plt->ldr); ++i) + plt->ldr[i] = PLT_ENT_LDR; + + BUILD_BUG_ON(sizeof(fixed_plts) > sizeof(plt->lit)); + memcpy(plt->lit, fixed_plts, sizeof(fixed_plts)); } u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val) { - struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core : - &mod->arch.init; + struct mod_plt_sec *pltsec = !within_module_init(loc, mod) ? + &mod->arch.core : &mod->arch.init; + struct plt_entries *plt; + int idx; + + /* cache the address, ELF header is available only during module load */ + if (!pltsec->plt_ent) + pltsec->plt_ent = (struct plt_entries *)pltsec->plt->sh_addr; + plt = pltsec->plt_ent; + + prealloc_fixed(pltsec, plt); - struct plt_entries *plt = (struct plt_entries *)pltsec->plt->sh_addr; - int idx = 0; + for (idx = 0; idx < ARRAY_SIZE(fixed_plts); ++idx) + if (plt->lit[idx] == val) + return (u32)&plt->ldr[idx]; + idx = 0; /* * Look for an existing entry pointing to 'val'. Given that the * relocations are sorted, this will be the last entry we allocated. @@ -189,8 +209,8 @@ static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base, int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, struct module *mod) { - unsigned long core_plts = 0; - unsigned long init_plts = 0; + unsigned long core_plts = ARRAY_SIZE(fixed_plts); + unsigned long init_plts = ARRAY_SIZE(fixed_plts); Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum; Elf32_Sym *syms = NULL; @@ -231,7 +251,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, /* sort by type and symbol index */ sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL); - if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0) + if (!module_init_layout_section(secstrings + dstsec->sh_name)) core_plts += count_plts(syms, dstsec->sh_addr, rels, numrels, s->sh_info); else @@ -245,6 +265,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE, sizeof(struct plt_entries)); mod->arch.core.plt_count = 0; + mod->arch.core.plt_ent = NULL; mod->arch.init.plt->sh_type = SHT_NOBITS; mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; @@ -252,8 +273,23 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE, sizeof(struct plt_entries)); mod->arch.init.plt_count = 0; + mod->arch.init.plt_ent = NULL; pr_debug("%s: plt=%x, init.plt=%x\n", __func__, mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size); return 0; } + +bool in_module_plt(unsigned long loc) +{ + struct module *mod; + bool ret; + + preempt_disable(); + mod = __module_text_address(loc); + ret = mod && (loc - (u32)mod->arch.core.plt_ent < mod->arch.core.plt_count * PLT_ENT_SIZE || + loc - (u32)mod->arch.init.plt_ent < mod->arch.init.plt_count * PLT_ENT_SIZE); + preempt_enable(); + + return ret; +} diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index deef17f34bd2..e74d84f58b77 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -17,7 +17,6 @@ #include <linux/string.h> #include <linux/gfp.h> -#include <asm/pgtable.h> #include <asm/sections.h> #include <asm/smp_plat.h> #include <asm/unwind.h> @@ -55,6 +54,13 @@ void *module_alloc(unsigned long size) } #endif +bool module_init_section(const char *name) +{ + return strstarts(name, ".init") || + strstarts(name, ".ARM.extab.init") || + strstarts(name, ".ARM.exidx.init"); +} + bool module_exit_section(const char *name) { return strstarts(name, ".exit") || @@ -62,6 +68,44 @@ bool module_exit_section(const char *name) strstarts(name, ".ARM.exidx.exit"); } +#ifdef CONFIG_ARM_HAS_GROUP_RELOCS +/* + * This implements the partitioning algorithm for group relocations as + * documented in the ARM AArch32 ELF psABI (IHI 0044). + * + * A single PC-relative symbol reference is divided in up to 3 add or subtract + * operations, where the final one could be incorporated into a load/store + * instruction with immediate offset. E.g., + * + * ADD Rd, PC, #... or ADD Rd, PC, #... + * ADD Rd, Rd, #... ADD Rd, Rd, #... + * LDR Rd, [Rd, #...] ADD Rd, Rd, #... + * + * The latter has a guaranteed range of only 16 MiB (3x8 == 24 bits), so it is + * of limited use in the kernel. However, the ADD/ADD/LDR combo has a range of + * -/+ 256 MiB, (2x8 + 12 == 28 bits), which means it has sufficient range for + * any in-kernel symbol reference (unless module PLTs are being used). + * + * The main advantage of this approach over the typical pattern using a literal + * load is that literal loads may miss in the D-cache, and generally lead to + * lower cache efficiency for variables that are referenced often from many + * different places in the code. + */ +static u32 get_group_rem(u32 group, u32 *offset) +{ + u32 val = *offset; + u32 shift; + do { + shift = val ? (31 - __fls(val)) & ~1 : 32; + *offset = val; + if (!val) + break; + val &= 0xffffff >> shift; + } while (group--); + return shift; +} +#endif + int apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relindex, struct module *module) @@ -76,6 +120,9 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned long loc; Elf32_Sym *sym; const char *symname; +#ifdef CONFIG_ARM_HAS_GROUP_RELOCS + u32 shift, group = 1; +#endif s32 offset; u32 tmp; #ifdef CONFIG_THUMB2_KERNEL @@ -122,8 +169,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, offset = __mem_to_opcode_arm(*(u32 *)loc); offset = (offset & 0x00ffffff) << 2; - if (offset & 0x02000000) - offset -= 0x04000000; + offset = sign_extend32(offset, 25); offset += sym->st_value - loc; @@ -179,14 +225,24 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, *(u32 *)loc |= offset & 0x7fffffff; break; + case R_ARM_REL32: + *(u32 *)loc += sym->st_value - loc; + break; + case R_ARM_MOVW_ABS_NC: case R_ARM_MOVT_ABS: + case R_ARM_MOVW_PREL_NC: + case R_ARM_MOVT_PREL: offset = tmp = __mem_to_opcode_arm(*(u32 *)loc); offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff); - offset = (offset ^ 0x8000) - 0x8000; + offset = sign_extend32(offset, 15); offset += sym->st_value; - if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS) + if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_PREL || + ELF32_R_TYPE(rel->r_info) == R_ARM_MOVW_PREL_NC) + offset -= loc; + if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS || + ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_PREL) offset >>= 16; tmp &= 0xfff0f000; @@ -196,6 +252,55 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, *(u32 *)loc = __opcode_to_mem_arm(tmp); break; +#ifdef CONFIG_ARM_HAS_GROUP_RELOCS + case R_ARM_ALU_PC_G0_NC: + group = 0; + fallthrough; + case R_ARM_ALU_PC_G1_NC: + tmp = __mem_to_opcode_arm(*(u32 *)loc); + offset = ror32(tmp & 0xff, (tmp & 0xf00) >> 7); + if (tmp & BIT(22)) + offset = -offset; + offset += sym->st_value - loc; + if (offset < 0) { + offset = -offset; + tmp = (tmp & ~BIT(23)) | BIT(22); // SUB opcode + } else { + tmp = (tmp & ~BIT(22)) | BIT(23); // ADD opcode + } + + shift = get_group_rem(group, &offset); + if (shift < 24) { + offset >>= 24 - shift; + offset |= (shift + 8) << 7; + } + *(u32 *)loc = __opcode_to_mem_arm((tmp & ~0xfff) | offset); + break; + + case R_ARM_LDR_PC_G2: + tmp = __mem_to_opcode_arm(*(u32 *)loc); + offset = tmp & 0xfff; + if (~tmp & BIT(23)) // U bit cleared? + offset = -offset; + offset += sym->st_value - loc; + if (offset < 0) { + offset = -offset; + tmp &= ~BIT(23); // clear U bit + } else { + tmp |= BIT(23); // set U bit + } + get_group_rem(2, &offset); + + if (offset > 0xfff) { + pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", + module->name, relindex, i, symname, + ELF32_R_TYPE(rel->r_info), loc, + sym->st_value); + return -ENOEXEC; + } + *(u32 *)loc = __opcode_to_mem_arm((tmp & ~0xfff) | offset); + break; +#endif #ifdef CONFIG_THUMB2_KERNEL case R_ARM_THM_CALL: case R_ARM_THM_JUMP24: @@ -238,8 +343,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, ((~(j2 ^ sign) & 1) << 22) | ((upper & 0x03ff) << 12) | ((lower & 0x07ff) << 1); - if (offset & 0x01000000) - offset -= 0x02000000; + offset = sign_extend32(offset, 24); offset += sym->st_value - loc; /* @@ -277,6 +381,8 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, case R_ARM_THM_MOVW_ABS_NC: case R_ARM_THM_MOVT_ABS: + case R_ARM_THM_MOVW_PREL_NC: + case R_ARM_THM_MOVT_PREL: upper = __mem_to_opcode_thumb16(*(u16 *)loc); lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2)); @@ -293,10 +399,14 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, offset = ((upper & 0x000f) << 12) | ((upper & 0x0400) << 1) | ((lower & 0x7000) >> 4) | (lower & 0x00ff); - offset = (offset ^ 0x8000) - 0x8000; + offset = sign_extend32(offset, 15); offset += sym->st_value; - if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS) + if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_PREL || + ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVW_PREL_NC) + offset -= loc; + if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS || + ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_PREL) offset >>= 16; upper = (u16)((upper & 0xfbf0) | @@ -347,46 +457,40 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, #ifdef CONFIG_ARM_UNWIND const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum; - struct mod_unwind_map maps[ARM_SEC_MAX]; - int i; + struct list_head *unwind_list = &mod->arch.unwind_list; - memset(maps, 0, sizeof(maps)); + INIT_LIST_HEAD(unwind_list); + mod->arch.init_table = NULL; for (s = sechdrs; s < sechdrs_end; s++) { const char *secname = secstrs + s->sh_name; + const char *txtname; + const Elf_Shdr *txt_sec; - if (!(s->sh_flags & SHF_ALLOC)) + if (!(s->sh_flags & SHF_ALLOC) || + s->sh_type != ELF_SECTION_UNWIND) continue; - if (strcmp(".ARM.exidx.init.text", secname) == 0) - maps[ARM_SEC_INIT].unw_sec = s; - else if (strcmp(".ARM.exidx", secname) == 0) - maps[ARM_SEC_CORE].unw_sec = s; - else if (strcmp(".ARM.exidx.exit.text", secname) == 0) - maps[ARM_SEC_EXIT].unw_sec = s; - else if (strcmp(".ARM.exidx.text.unlikely", secname) == 0) - maps[ARM_SEC_UNLIKELY].unw_sec = s; - else if (strcmp(".ARM.exidx.text.hot", secname) == 0) - maps[ARM_SEC_HOT].unw_sec = s; - else if (strcmp(".init.text", secname) == 0) - maps[ARM_SEC_INIT].txt_sec = s; - else if (strcmp(".text", secname) == 0) - maps[ARM_SEC_CORE].txt_sec = s; - else if (strcmp(".exit.text", secname) == 0) - maps[ARM_SEC_EXIT].txt_sec = s; - else if (strcmp(".text.unlikely", secname) == 0) - maps[ARM_SEC_UNLIKELY].txt_sec = s; - else if (strcmp(".text.hot", secname) == 0) - maps[ARM_SEC_HOT].txt_sec = s; - } + if (!strcmp(".ARM.exidx", secname)) + txtname = ".text"; + else + txtname = secname + strlen(".ARM.exidx"); + txt_sec = find_mod_section(hdr, sechdrs, txtname); + + if (txt_sec) { + struct unwind_table *table = + unwind_table_add(s->sh_addr, + s->sh_size, + txt_sec->sh_addr, + txt_sec->sh_size); - for (i = 0; i < ARM_SEC_MAX; i++) - if (maps[i].unw_sec && maps[i].txt_sec) - mod->arch.unwind[i] = - unwind_table_add(maps[i].unw_sec->sh_addr, - maps[i].unw_sec->sh_size, - maps[i].txt_sec->sh_addr, - maps[i].txt_sec->sh_size); + list_add(&table->mod_list, unwind_list); + + /* save init table for module_arch_freeing_init */ + if (strcmp(".ARM.exidx.init.text", secname) == 0) + mod->arch.init_table = table; + } + } #endif #ifdef CONFIG_ARM_PATCH_PHYS_VIRT s = find_mod_section(hdr, sechdrs, ".pv_table"); @@ -407,10 +511,27 @@ void module_arch_cleanup(struct module *mod) { #ifdef CONFIG_ARM_UNWIND - int i; + struct unwind_table *tmp; + struct unwind_table *n; - for (i = 0; i < ARM_SEC_MAX; i++) - if (mod->arch.unwind[i]) - unwind_table_del(mod->arch.unwind[i]); + list_for_each_entry_safe(tmp, n, + &mod->arch.unwind_list, mod_list) { + list_del(&tmp->mod_list); + unwind_table_del(tmp); + } + mod->arch.init_table = NULL; +#endif +} + +void __weak module_arch_freeing_init(struct module *mod) +{ +#ifdef CONFIG_ARM_UNWIND + struct unwind_table *init = mod->arch.init_table; + + if (init) { + mod->arch.init_table = NULL; + list_del(&init->mod_list); + unwind_table_del(init); + } #endif } diff --git a/arch/arm/kernel/module.lds b/arch/arm/kernel/module.lds deleted file mode 100644 index 79cb6af565e5..000000000000 --- a/arch/arm/kernel/module.lds +++ /dev/null @@ -1,5 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -SECTIONS { - .plt : { BYTE(0) } - .init.plt : { BYTE(0) } -} diff --git a/arch/arm/kernel/paravirt.c b/arch/arm/kernel/paravirt.c index 4cfed91fe256..7dd9806369fb 100644 --- a/arch/arm/kernel/paravirt.c +++ b/arch/arm/kernel/paravirt.c @@ -9,10 +9,15 @@ #include <linux/export.h> #include <linux/jump_label.h> #include <linux/types.h> +#include <linux/static_call.h> #include <asm/paravirt.h> struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_rq_enabled; -struct paravirt_patch_template pv_ops; -EXPORT_SYMBOL_GPL(pv_ops); +static u64 native_steal_clock(int cpu) +{ + return 0; +} + +DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c index d0a05a3bdb96..e9e828b6bb30 100644 --- a/arch/arm/kernel/patch.c +++ b/arch/arm/kernel/patch.c @@ -16,10 +16,10 @@ struct patch { unsigned int insn; }; +#ifdef CONFIG_MMU static DEFINE_RAW_SPINLOCK(patch_lock); static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags) - __acquires(&patch_lock) { unsigned int uintaddr = (uintptr_t) addr; bool module = !core_kernel_text(uintaddr); @@ -34,8 +34,6 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags) if (flags) raw_spin_lock_irqsave(&patch_lock, *flags); - else - __acquire(&patch_lock); set_fixmap(fixmap, page_to_phys(page)); @@ -43,15 +41,19 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags) } static void __kprobes patch_unmap(int fixmap, unsigned long *flags) - __releases(&patch_lock) { clear_fixmap(fixmap); if (flags) raw_spin_unlock_irqrestore(&patch_lock, *flags); - else - __release(&patch_lock); } +#else +static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags) +{ + return addr; +} +static void __kprobes patch_unmap(int fixmap, unsigned long *flags) { } +#endif void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap) { @@ -64,8 +66,6 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap) if (remap) waddr = patch_map(addr, FIX_TEXT_POKE0, &flags); - else - __acquire(&patch_lock); if (thumb2 && __opcode_is_thumb16(insn)) { *(u16 *)waddr = __opcode_to_mem_thumb16(insn); @@ -102,8 +102,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap) if (waddr != addr) { flush_kernel_vmap_range(waddr, twopage ? size / 2 : size); patch_unmap(FIX_TEXT_POKE0, &flags); - } else - __release(&patch_lock); + } flush_icache_range((uintptr_t)(addr), (uintptr_t)(addr) + size); diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c index 3b69a76d341e..7147edbe56c6 100644 --- a/arch/arm/kernel/perf_callchain.c +++ b/arch/arm/kernel/perf_callchain.c @@ -64,11 +64,6 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs { struct frame_tail __user *tail; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { - /* We don't support guest os callchain now */ - return; - } - perf_callchain_store(entry, regs->ARM_pc); if (!current->mm) @@ -86,13 +81,12 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs * whist unwinding the stackframe and is like a subroutine return so we use * the PC. */ -static int -callchain_trace(struct stackframe *fr, - void *data) +static bool +callchain_trace(void *data, unsigned long pc) { struct perf_callchain_entry_ctx *entry = data; - perf_callchain_store(entry, fr->pc); - return 0; + perf_callchain_store(entry, pc); + return true; } void @@ -100,20 +94,12 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re { struct stackframe fr; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { - /* We don't support guest os callchain now */ - return; - } - arm_get_current_stackframe(regs, &fr); walk_stackframe(&fr, callchain_trace, entry); } unsigned long perf_instruction_pointer(struct pt_regs *regs) { - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) - return perf_guest_cbs->get_guest_ip(); - return instruction_pointer(regs); } @@ -121,17 +107,10 @@ unsigned long perf_misc_flags(struct pt_regs *regs) { int misc = 0; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { - if (perf_guest_cbs->is_user_mode()) - misc |= PERF_RECORD_MISC_GUEST_USER; - else - misc |= PERF_RECORD_MISC_GUEST_KERNEL; - } else { - if (user_mode(regs)) - misc |= PERF_RECORD_MISC_USER; - else - misc |= PERF_RECORD_MISC_KERNEL; - } + if (user_mode(regs)) + misc |= PERF_RECORD_MISC_USER; + else + misc |= PERF_RECORD_MISC_KERNEL; return misc; } diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 1ae99deeec54..d9fd53841591 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -113,69 +113,6 @@ static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, }; -enum armv6mpcore_perf_types { - ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0, - ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1, - ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2, - ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3, - ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4, - ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5, - ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6, - ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7, - ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8, - ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA, - ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB, - ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC, - ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD, - ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE, - ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF, - ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10, - ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11, - ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12, - ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13, - ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF, -}; - -/* - * The hardware events that we support. We do support cache operations but - * we have harvard caches and no way to combine instruction and data - * accesses/misses in hardware. - */ -static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = { - PERF_MAP_ALL_UNSUPPORTED, - [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES, - [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC, - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC, - [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT, - [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6MPCORE_PERFCTR_IBUF_STALL, - [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6MPCORE_PERFCTR_LSU_FULL_STALL, -}; - -static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX] = { - PERF_CACHE_MAP_ALL_UNSUPPORTED, - - [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS, - [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DCACHE_RDMISS, - [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS, - [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DCACHE_WRMISS, - - [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, - - /* - * The ARM performance counters can count micro DTLB misses, micro ITLB - * misses and main TLB misses. There isn't an event for TLB misses, so - * use the micro misses here and if users want the main TLB misses they - * can use a raw counter. - */ - [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS, - [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS, - - [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS, - [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS, -}; - static inline unsigned long armv6_pmcr_read(void) { @@ -268,10 +205,8 @@ static inline void armv6pmu_write_counter(struct perf_event *event, u64 value) static void armv6pmu_enable_event(struct perf_event *event) { - unsigned long val, mask, evt, flags; - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + unsigned long val, mask, evt; struct hw_perf_event *hwc = &event->hw; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; if (ARMV6_CYCLE_COUNTER == idx) { @@ -294,12 +229,10 @@ static void armv6pmu_enable_event(struct perf_event *event) * Mask out the current event and set the counter to count the event * that we're interested in. */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val &= ~mask; val |= evt; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static irqreturn_t @@ -362,26 +295,20 @@ armv6pmu_handle_irq(struct arm_pmu *cpu_pmu) static void armv6pmu_start(struct arm_pmu *cpu_pmu) { - unsigned long flags, val; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + unsigned long val; - raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val |= ARMV6_PMCR_ENABLE; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void armv6pmu_stop(struct arm_pmu *cpu_pmu) { - unsigned long flags, val; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + unsigned long val; - raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val &= ~ARMV6_PMCR_ENABLE; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int @@ -419,10 +346,8 @@ static void armv6pmu_clear_event_idx(struct pmu_hw_events *cpuc, static void armv6pmu_disable_event(struct perf_event *event) { - unsigned long val, mask, evt, flags; - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + unsigned long val, mask, evt; struct hw_perf_event *hwc = &event->hw; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; if (ARMV6_CYCLE_COUNTER == idx) { @@ -444,43 +369,10 @@ static void armv6pmu_disable_event(struct perf_event *event) * of ETM bus signal assertion cycles. The external reporting should * be disabled and so this should never increment. */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); - val = armv6_pmcr_read(); - val &= ~mask; - val |= evt; - armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); -} - -static void armv6mpcore_pmu_disable_event(struct perf_event *event) -{ - unsigned long val, mask, flags, evt = 0; - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); - struct hw_perf_event *hwc = &event->hw; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); - int idx = hwc->idx; - - if (ARMV6_CYCLE_COUNTER == idx) { - mask = ARMV6_PMCR_CCOUNT_IEN; - } else if (ARMV6_COUNTER0 == idx) { - mask = ARMV6_PMCR_COUNT0_IEN; - } else if (ARMV6_COUNTER1 == idx) { - mask = ARMV6_PMCR_COUNT1_IEN; - } else { - WARN_ONCE(1, "invalid counter number (%d)\n", idx); - return; - } - - /* - * Unlike UP ARMv6, we don't have a way of stopping the counters. We - * simply disable the interrupt reporting. - */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val &= ~mask; val |= evt; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int armv6_map_event(struct perf_event *event) @@ -525,40 +417,7 @@ static int armv6_1176_pmu_init(struct arm_pmu *cpu_pmu) return 0; } -/* - * ARMv6mpcore is almost identical to single core ARMv6 with the exception - * that some of the events have different enumerations and that there is no - * *hack* to stop the programmable counters. To stop the counters we simply - * disable the interrupt reporting and update the event. When unthrottling we - * reset the period and enable the interrupt reporting. - */ - -static int armv6mpcore_map_event(struct perf_event *event) -{ - return armpmu_map_event(event, &armv6mpcore_perf_map, - &armv6mpcore_perf_cache_map, 0xFF); -} - -static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu) -{ - cpu_pmu->name = "armv6_11mpcore"; - cpu_pmu->handle_irq = armv6pmu_handle_irq; - cpu_pmu->enable = armv6pmu_enable_event; - cpu_pmu->disable = armv6mpcore_pmu_disable_event; - cpu_pmu->read_counter = armv6pmu_read_counter; - cpu_pmu->write_counter = armv6pmu_write_counter; - cpu_pmu->get_event_idx = armv6pmu_get_event_idx; - cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx; - cpu_pmu->start = armv6pmu_start; - cpu_pmu->stop = armv6pmu_stop; - cpu_pmu->map_event = armv6mpcore_map_event; - cpu_pmu->num_events = 3; - - return 0; -} - static const struct of_device_id armv6_pmu_of_device_ids[] = { - {.compatible = "arm,arm11mpcore-pmu", .data = armv6mpcore_pmu_init}, {.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init}, {.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init}, { /* sentinel value */ } @@ -568,7 +427,6 @@ static const struct pmu_probe_info armv6_pmu_probe_table[] = { ARM_PMU_PROBE(ARM_CPU_PART_ARM1136, armv6_1136_pmu_init), ARM_PMU_PROBE(ARM_CPU_PART_ARM1156, armv6_1156_pmu_init), ARM_PMU_PROBE(ARM_CPU_PART_ARM1176, armv6_1176_pmu_init), - ARM_PMU_PROBE(ARM_CPU_PART_ARM11MPCORE, armv6mpcore_pmu_init), { /* sentinel value */ } }; diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 2924d7910b10..a3322e2b3ea4 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -773,10 +773,10 @@ static inline void armv7pmu_write_counter(struct perf_event *event, u64 value) pr_err("CPU%u writing wrong counter %d\n", smp_processor_id(), idx); } else if (idx == ARMV7_IDX_CYCLE_COUNTER) { - asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value)); + asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" ((u32)value)); } else { armv7_pmnc_select_counter(idx); - asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value)); + asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" ((u32)value)); } } @@ -870,10 +870,8 @@ static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu) static void armv7pmu_enable_event(struct perf_event *event) { - unsigned long flags; struct hw_perf_event *hwc = &event->hw; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { @@ -886,7 +884,6 @@ static void armv7pmu_enable_event(struct perf_event *event) * Enable counter and interrupt, and set the counter to count * the event that we're interested in. */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); /* * Disable counter @@ -910,16 +907,12 @@ static void armv7pmu_enable_event(struct perf_event *event) * Enable counter */ armv7_pmnc_enable_counter(idx); - - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void armv7pmu_disable_event(struct perf_event *event) { - unsigned long flags; struct hw_perf_event *hwc = &event->hw; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { @@ -931,7 +924,6 @@ static void armv7pmu_disable_event(struct perf_event *event) /* * Disable counter and interrupt */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); /* * Disable counter @@ -942,8 +934,6 @@ static void armv7pmu_disable_event(struct perf_event *event) * Disable interrupt for this counter */ armv7_pmnc_disable_intens(idx); - - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu) @@ -1009,24 +999,14 @@ static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu) static void armv7pmu_start(struct arm_pmu *cpu_pmu) { - unsigned long flags; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); - - raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Enable all counters */ armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void armv7pmu_stop(struct arm_pmu *cpu_pmu) { - unsigned long flags; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); - - raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable all counters */ armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, @@ -1072,8 +1052,10 @@ static int armv7pmu_set_event_filter(struct hw_perf_event *event, { unsigned long config_base = 0; - if (attr->exclude_idle) - return -EPERM; + if (attr->exclude_idle) { + pr_debug("ARM performance counters do not support mode exclusion\n"); + return -EOPNOTSUPP; + } if (attr->exclude_user) config_base |= ARMV7_EXCLUDE_USER; if (attr->exclude_kernel) @@ -1492,14 +1474,10 @@ static void krait_clearpmu(u32 config_base) static void krait_pmu_disable_event(struct perf_event *event) { - unsigned long flags; struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); /* Disable counter and interrupt */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable counter */ armv7_pmnc_disable_counter(idx); @@ -1512,23 +1490,17 @@ static void krait_pmu_disable_event(struct perf_event *event) /* Disable interrupt for this counter */ armv7_pmnc_disable_intens(idx); - - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void krait_pmu_enable_event(struct perf_event *event) { - unsigned long flags; struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); /* * Enable counter and interrupt, and set the counter to count * the event that we're interested in. */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable counter */ armv7_pmnc_disable_counter(idx); @@ -1548,8 +1520,6 @@ static void krait_pmu_enable_event(struct perf_event *event) /* Enable counter */ armv7_pmnc_enable_counter(idx); - - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void krait_pmu_reset(void *info) @@ -1825,14 +1795,10 @@ static void scorpion_clearpmu(u32 config_base) static void scorpion_pmu_disable_event(struct perf_event *event) { - unsigned long flags; struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); /* Disable counter and interrupt */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable counter */ armv7_pmnc_disable_counter(idx); @@ -1845,23 +1811,17 @@ static void scorpion_pmu_disable_event(struct perf_event *event) /* Disable interrupt for this counter */ armv7_pmnc_disable_intens(idx); - - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void scorpion_pmu_enable_event(struct perf_event *event) { - unsigned long flags; struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); /* * Enable counter and interrupt, and set the counter to count * the event that we're interested in. */ - raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable counter */ armv7_pmnc_disable_counter(idx); @@ -1881,8 +1841,6 @@ static void scorpion_pmu_enable_event(struct perf_event *event) /* Enable counter */ armv7_pmnc_enable_counter(idx); - - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void scorpion_pmu_reset(void *info) diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index f6cdcacfb96d..7a2ba1c689a7 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -203,10 +203,8 @@ xscale1pmu_handle_irq(struct arm_pmu *cpu_pmu) static void xscale1pmu_enable_event(struct perf_event *event) { - unsigned long val, mask, evt, flags; - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + unsigned long val, mask, evt; struct hw_perf_event *hwc = &event->hw; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; switch (idx) { @@ -229,20 +227,16 @@ static void xscale1pmu_enable_event(struct perf_event *event) return; } - raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale1pmu_read_pmnc(); val &= ~mask; val |= evt; xscale1pmu_write_pmnc(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void xscale1pmu_disable_event(struct perf_event *event) { - unsigned long val, mask, evt, flags; - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + unsigned long val, mask, evt; struct hw_perf_event *hwc = &event->hw; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; switch (idx) { @@ -263,12 +257,10 @@ static void xscale1pmu_disable_event(struct perf_event *event) return; } - raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale1pmu_read_pmnc(); val &= ~mask; val |= evt; xscale1pmu_write_pmnc(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int @@ -300,26 +292,20 @@ static void xscalepmu_clear_event_idx(struct pmu_hw_events *cpuc, static void xscale1pmu_start(struct arm_pmu *cpu_pmu) { - unsigned long flags, val; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + unsigned long val; - raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale1pmu_read_pmnc(); val |= XSCALE_PMU_ENABLE; xscale1pmu_write_pmnc(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void xscale1pmu_stop(struct arm_pmu *cpu_pmu) { - unsigned long flags, val; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + unsigned long val; - raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale1pmu_read_pmnc(); val &= ~XSCALE_PMU_ENABLE; xscale1pmu_write_pmnc(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static inline u64 xscale1pmu_read_counter(struct perf_event *event) @@ -549,10 +535,8 @@ xscale2pmu_handle_irq(struct arm_pmu *cpu_pmu) static void xscale2pmu_enable_event(struct perf_event *event) { - unsigned long flags, ien, evtsel; - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + unsigned long ien, evtsel; struct hw_perf_event *hwc = &event->hw; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; ien = xscale2pmu_read_int_enable(); @@ -587,18 +571,14 @@ static void xscale2pmu_enable_event(struct perf_event *event) return; } - raw_spin_lock_irqsave(&events->pmu_lock, flags); xscale2pmu_write_event_select(evtsel); xscale2pmu_write_int_enable(ien); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void xscale2pmu_disable_event(struct perf_event *event) { - unsigned long flags, ien, evtsel, of_flags; - struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); + unsigned long ien, evtsel, of_flags; struct hw_perf_event *hwc = &event->hw; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; ien = xscale2pmu_read_int_enable(); @@ -638,11 +618,9 @@ static void xscale2pmu_disable_event(struct perf_event *event) return; } - raw_spin_lock_irqsave(&events->pmu_lock, flags); xscale2pmu_write_event_select(evtsel); xscale2pmu_write_int_enable(ien); xscale2pmu_write_overflow_flags(of_flags); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int @@ -663,26 +641,20 @@ out: static void xscale2pmu_start(struct arm_pmu *cpu_pmu) { - unsigned long flags, val; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + unsigned long val; - raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; val |= XSCALE_PMU_ENABLE; xscale2pmu_write_pmnc(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void xscale2pmu_stop(struct arm_pmu *cpu_pmu) { - unsigned long flags, val; - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); + unsigned long val; - raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale2pmu_read_pmnc(); val &= ~XSCALE_PMU_ENABLE; xscale2pmu_write_pmnc(val); - raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static inline u64 xscale2pmu_read_counter(struct perf_event *event) diff --git a/arch/arm/kernel/perf_regs.c b/arch/arm/kernel/perf_regs.c index 05fe92aa7d98..0529f90395c9 100644 --- a/arch/arm/kernel/perf_regs.c +++ b/arch/arm/kernel/perf_regs.c @@ -32,8 +32,7 @@ u64 perf_reg_abi(struct task_struct *task) } void perf_get_regs_user(struct perf_regs *regs_user, - struct pt_regs *regs, - struct pt_regs *regs_user_copy) + struct pt_regs *regs) { regs_user->regs = task_pt_regs(current); regs_user->abi = perf_reg_abi(current); diff --git a/arch/arm/kernel/phys2virt.S b/arch/arm/kernel/phys2virt.S new file mode 100644 index 000000000000..fb53db78fe78 --- /dev/null +++ b/arch/arm/kernel/phys2virt.S @@ -0,0 +1,238 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 1994-2002 Russell King + * Copyright (c) 2003, 2020 ARM Limited + * All Rights Reserved + */ + +#include <linux/init.h> +#include <linux/linkage.h> +#include <asm/assembler.h> +#include <asm/page.h> + +#ifdef __ARMEB__ +#define LOW_OFFSET 0x4 +#define HIGH_OFFSET 0x0 +#else +#define LOW_OFFSET 0x0 +#define HIGH_OFFSET 0x4 +#endif + +/* + * __fixup_pv_table - patch the stub instructions with the delta between + * PHYS_OFFSET and PAGE_OFFSET, which is assumed to be + * 2 MiB aligned. + * + * Called from head.S, which expects the following registers to be preserved: + * r1 = machine no, r2 = atags or dtb, + * r8 = phys_offset, r9 = cpuid, r10 = procinfo + */ + __HEAD +ENTRY(__fixup_pv_table) + mov r0, r8, lsr #PAGE_SHIFT @ convert to PFN + str_l r0, __pv_phys_pfn_offset, r3 + + adr_l r0, __pv_offset + subs r3, r8, #PAGE_OFFSET @ PHYS_OFFSET - PAGE_OFFSET + mvn ip, #0 + strcc ip, [r0, #HIGH_OFFSET] @ save to __pv_offset high bits + str r3, [r0, #LOW_OFFSET] @ save to __pv_offset low bits + + mov r0, r3, lsr #21 @ constant for add/sub instructions + teq r3, r0, lsl #21 @ must be 2 MiB aligned + bne 0f + + adr_l r4, __pv_table_begin + adr_l r5, __pv_table_end + b __fixup_a_pv_table + +0: mov r0, r0 @ deadloop on error + b 0b +ENDPROC(__fixup_pv_table) + + .text +__fixup_a_pv_table: + adr_l r6, __pv_offset + ldr r0, [r6, #HIGH_OFFSET] @ pv_offset high word + ldr r6, [r6, #LOW_OFFSET] @ pv_offset low word + cmn r0, #1 +#ifdef CONFIG_THUMB2_KERNEL + @ + @ The Thumb-2 versions of the patchable sequences are + @ + @ phys-to-virt: movw <reg>, #offset<31:21> + @ lsl <reg>, #21 + @ sub <VA>, <PA>, <reg> + @ + @ virt-to-phys (non-LPAE): movw <reg>, #offset<31:21> + @ lsl <reg>, #21 + @ add <PA>, <VA>, <reg> + @ + @ virt-to-phys (LPAE): movw <reg>, #offset<31:21> + @ lsl <reg>, #21 + @ adds <PAlo>, <VA>, <reg> + @ mov <PAhi>, #offset<39:32> + @ adc <PAhi>, <PAhi>, #0 + @ + @ In the non-LPAE case, all patchable instructions are MOVW + @ instructions, where we need to patch in the offset into the + @ second halfword of the opcode (the 16-bit immediate is encoded + @ as imm4:i:imm3:imm8) + @ + @ 15 11 10 9 4 3 0 15 14 12 11 8 7 0 + @ +-----------+---+-------------+------++---+------+----+------+ + @ MOVW | 1 1 1 1 0 | i | 1 0 0 1 0 0 | imm4 || 0 | imm3 | Rd | imm8 | + @ +-----------+---+-------------+------++---+------+----+------+ + @ + @ In the LPAE case, we also need to patch in the high word of the + @ offset into the immediate field of the MOV instruction, or patch it + @ to a MVN instruction if the offset is negative. In this case, we + @ need to inspect the first halfword of the opcode, to check whether + @ it is MOVW or MOV/MVN, and to perform the MOV to MVN patching if + @ needed. The encoding of the immediate is rather complex for values + @ of i:imm3 != 0b0000, but fortunately, we never need more than 8 lower + @ order bits, which can be patched into imm8 directly (and i:imm3 + @ cleared) + @ + @ 15 11 10 9 5 0 15 14 12 11 8 7 0 + @ +-----------+---+---------------------++---+------+----+------+ + @ MOV | 1 1 1 1 0 | i | 0 0 0 1 0 0 1 1 1 1 || 0 | imm3 | Rd | imm8 | + @ MVN | 1 1 1 1 0 | i | 0 0 0 1 1 0 1 1 1 1 || 0 | imm3 | Rd | imm8 | + @ +-----------+---+---------------------++---+------+----+------+ + @ + moveq r0, #0x200000 @ set bit 21, mov to mvn instruction + lsrs r3, r6, #29 @ isolate top 3 bits of displacement + ubfx r6, r6, #21, #8 @ put bits 28:21 into the MOVW imm8 field + bfi r6, r3, #12, #3 @ put bits 31:29 into the MOVW imm3 field + b .Lnext +.Lloop: add r7, r4 + adds r4, #4 @ clears Z flag +#ifdef CONFIG_ARM_LPAE + ldrh ip, [r7] +ARM_BE8(rev16 ip, ip) + tst ip, #0x200 @ MOVW has bit 9 set, MVN has it clear + bne 0f @ skip to MOVW handling (Z flag is clear) + bic ip, #0x20 @ clear bit 5 (MVN -> MOV) + orr ip, ip, r0, lsr #16 @ MOV -> MVN if offset < 0 +ARM_BE8(rev16 ip, ip) + strh ip, [r7] + @ Z flag is set +0: +#endif + ldrh ip, [r7, #2] +ARM_BE8(rev16 ip, ip) + and ip, #0xf00 @ clear everything except Rd field + orreq ip, r0 @ Z flag set -> MOV/MVN -> patch in high bits + orrne ip, r6 @ Z flag clear -> MOVW -> patch in low bits +ARM_BE8(rev16 ip, ip) + strh ip, [r7, #2] +#else +#ifdef CONFIG_CPU_ENDIAN_BE8 +@ in BE8, we load data in BE, but instructions still in LE +#define PV_BIT24 0x00000001 +#define PV_IMM8_MASK 0xff000000 +#define PV_IMMR_MSB 0x00080000 +#else +#define PV_BIT24 0x01000000 +#define PV_IMM8_MASK 0x000000ff +#define PV_IMMR_MSB 0x00000800 +#endif + + @ + @ The ARM versions of the patchable sequences are + @ + @ phys-to-virt: sub <VA>, <PA>, #offset<31:24>, lsl #24 + @ sub <VA>, <PA>, #offset<23:16>, lsl #16 + @ + @ virt-to-phys (non-LPAE): add <PA>, <VA>, #offset<31:24>, lsl #24 + @ add <PA>, <VA>, #offset<23:16>, lsl #16 + @ + @ virt-to-phys (LPAE): movw <reg>, #offset<31:20> + @ adds <PAlo>, <VA>, <reg>, lsl #20 + @ mov <PAhi>, #offset<39:32> + @ adc <PAhi>, <PAhi>, #0 + @ + @ In the non-LPAE case, all patchable instructions are ADD or SUB + @ instructions, where we need to patch in the offset into the + @ immediate field of the opcode, which is emitted with the correct + @ rotation value. (The effective value of the immediate is imm12<7:0> + @ rotated right by [2 * imm12<11:8>] bits) + @ + @ 31 28 27 23 22 20 19 16 15 12 11 0 + @ +------+-----------------+------+------+-------+ + @ ADD | cond | 0 0 1 0 1 0 0 0 | Rn | Rd | imm12 | + @ SUB | cond | 0 0 1 0 0 1 0 0 | Rn | Rd | imm12 | + @ MOV | cond | 0 0 1 1 1 0 1 0 | Rn | Rd | imm12 | + @ MVN | cond | 0 0 1 1 1 1 1 0 | Rn | Rd | imm12 | + @ +------+-----------------+------+------+-------+ + @ + @ In the LPAE case, we use a MOVW instruction to carry the low offset + @ word, and patch in the high word of the offset into the immediate + @ field of the subsequent MOV instruction, or patch it to a MVN + @ instruction if the offset is negative. We can distinguish MOVW + @ instructions based on bits 23:22 of the opcode, and ADD/SUB can be + @ distinguished from MOV/MVN (all using the encodings above) using + @ bit 24. + @ + @ 31 28 27 23 22 20 19 16 15 12 11 0 + @ +------+-----------------+------+------+-------+ + @ MOVW | cond | 0 0 1 1 0 0 0 0 | imm4 | Rd | imm12 | + @ +------+-----------------+------+------+-------+ + @ + moveq r0, #0x400000 @ set bit 22, mov to mvn instruction + mov r3, r6, lsr #16 @ put offset bits 31-16 into r3 + mov r6, r6, lsr #24 @ put offset bits 31-24 into r6 + and r3, r3, #0xf0 @ only keep offset bits 23-20 in r3 + b .Lnext +.Lloop: ldr ip, [r7, r4] +#ifdef CONFIG_ARM_LPAE + tst ip, #PV_BIT24 @ ADD/SUB have bit 24 clear + beq 1f +ARM_BE8(rev ip, ip) + tst ip, #0xc00000 @ MOVW has bits 23:22 clear + bic ip, ip, #0x400000 @ clear bit 22 + bfc ip, #0, #12 @ clear imm12 field of MOV[W] instruction + orreq ip, ip, r6, lsl #4 @ MOVW -> mask in offset bits 31-24 + orreq ip, ip, r3, lsr #4 @ MOVW -> mask in offset bits 23-20 + orrne ip, ip, r0 @ MOV -> mask in offset bits 7-0 (or bit 22) +ARM_BE8(rev ip, ip) + b 2f +1: +#endif + tst ip, #PV_IMMR_MSB @ rotation value >= 16 ? + bic ip, ip, #PV_IMM8_MASK + orreq ip, ip, r6 ARM_BE8(, lsl #24) @ mask in offset bits 31-24 + orrne ip, ip, r3 ARM_BE8(, lsl #24) @ mask in offset bits 23-20 +2: + str ip, [r7, r4] + add r4, r4, #4 +#endif + +.Lnext: + cmp r4, r5 + ldrcc r7, [r4] @ use branch for delay slot + bcc .Lloop + ret lr +ENDPROC(__fixup_a_pv_table) + +ENTRY(fixup_pv_table) + stmfd sp!, {r4 - r7, lr} + mov r4, r0 @ r0 = table start + add r5, r0, r1 @ r1 = table size + bl __fixup_a_pv_table + ldmfd sp!, {r4 - r7, pc} +ENDPROC(fixup_pv_table) + + .data + .align 2 + .globl __pv_phys_pfn_offset + .type __pv_phys_pfn_offset, %object +__pv_phys_pfn_offset: + .word 0 + .size __pv_phys_pfn_offset, . -__pv_phys_pfn_offset + + .globl __pv_offset + .type __pv_offset, %object +__pv_offset: + .quad 0 + .size __pv_offset, . -__pv_offset diff --git a/arch/arm/kernel/pj4-cp0.c b/arch/arm/kernel/pj4-cp0.c deleted file mode 100644 index 1d1fb22f44f3..000000000000 --- a/arch/arm/kernel/pj4-cp0.c +++ /dev/null @@ -1,134 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/arch/arm/kernel/pj4-cp0.c - * - * PJ4 iWMMXt coprocessor context switching and handling - * - * Copyright (c) 2010 Marvell International Inc. - */ - -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/signal.h> -#include <linux/sched.h> -#include <linux/init.h> -#include <linux/io.h> -#include <asm/thread_notify.h> -#include <asm/cputype.h> - -static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t) -{ - struct thread_info *thread = t; - - switch (cmd) { - case THREAD_NOTIFY_FLUSH: - /* - * flush_thread() zeroes thread->fpstate, so no need - * to do anything here. - * - * FALLTHROUGH: Ensure we don't try to overwrite our newly - * initialised state information on the first fault. - */ - - case THREAD_NOTIFY_EXIT: - iwmmxt_task_release(thread); - break; - - case THREAD_NOTIFY_SWITCH: - iwmmxt_task_switch(thread); - break; - } - - return NOTIFY_DONE; -} - -static struct notifier_block __maybe_unused iwmmxt_notifier_block = { - .notifier_call = iwmmxt_do, -}; - - -static u32 __init pj4_cp_access_read(void) -{ - u32 value; - - __asm__ __volatile__ ( - "mrc p15, 0, %0, c1, c0, 2\n\t" - : "=r" (value)); - return value; -} - -static void __init pj4_cp_access_write(u32 value) -{ - u32 temp; - - __asm__ __volatile__ ( - "mcr p15, 0, %1, c1, c0, 2\n\t" -#ifdef CONFIG_THUMB2_KERNEL - "isb\n\t" -#else - "mrc p15, 0, %0, c1, c0, 2\n\t" - "mov %0, %0\n\t" - "sub pc, pc, #4\n\t" -#endif - : "=r" (temp) : "r" (value)); -} - -static int __init pj4_get_iwmmxt_version(void) -{ - u32 cp_access, wcid; - - cp_access = pj4_cp_access_read(); - pj4_cp_access_write(cp_access | 0xf); - - /* check if coprocessor 0 and 1 are available */ - if ((pj4_cp_access_read() & 0xf) != 0xf) { - pj4_cp_access_write(cp_access); - return -ENODEV; - } - - /* read iWMMXt coprocessor id register p1, c0 */ - __asm__ __volatile__ ("mrc p1, 0, %0, c0, c0, 0\n" : "=r" (wcid)); - - pj4_cp_access_write(cp_access); - - /* iWMMXt v1 */ - if ((wcid & 0xffffff00) == 0x56051000) - return 1; - /* iWMMXt v2 */ - if ((wcid & 0xffffff00) == 0x56052000) - return 2; - - return -EINVAL; -} - -/* - * Disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy - * switch code handle iWMMXt context switching. - */ -static int __init pj4_cp0_init(void) -{ - u32 __maybe_unused cp_access; - int vers; - - if (!cpu_is_pj4()) - return 0; - - vers = pj4_get_iwmmxt_version(); - if (vers < 0) - return 0; - -#ifndef CONFIG_IWMMXT - pr_info("PJ4 iWMMXt coprocessor detected, but kernel support is missing.\n"); -#else - cp_access = pj4_cp_access_read() & ~0xf; - pj4_cp_access_write(cp_access); - - pr_info("PJ4 iWMMXt v%d coprocessor enabled.\n", vers); - elf_hwcap |= HWCAP_IWMMXT; - thread_register_notifier(&iwmmxt_notifier_block); -#endif - - return 0; -} - -late_initcall(pj4_cp0_init); diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index cea1c27c29cb..e16ed102960c 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -5,8 +5,6 @@ * Copyright (C) 1996-2000 Russell King - Converted to ARM. * Original Copyright (C) 1995 Linus Torvalds */ -#include <stdarg.h> - #include <linux/export.h> #include <linux/sched.h> #include <linux/sched/debug.h> @@ -38,12 +36,21 @@ #include "signal.h" +#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP) +DEFINE_PER_CPU(struct task_struct *, __entry_task); +#endif + #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) #include <linux/stackprotector.h> unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); #endif +#ifndef CONFIG_CURRENT_POINTER_IN_TPIDRURO +asmlinkage struct task_struct *__current; +EXPORT_SYMBOL(__current); +#endif + static const char *processor_modes[] __maybe_unused = { "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" , "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26", @@ -71,7 +78,6 @@ void arch_cpu_idle(void) arm_pm_idle(); else cpu_do_idle(); - local_irq_enable(); } void arch_cpu_idle_prepare(void) @@ -92,12 +98,23 @@ void arch_cpu_idle_exit(void) ledtrig_cpu(CPU_LED_IDLE_END); } +void __show_regs_alloc_free(struct pt_regs *regs) +{ + int i; + + /* check for r0 - r12 only */ + for (i = 0; i < 13; i++) { + pr_alert("Register r%d information:", i); + mem_dump_obj((void *)regs->uregs[i]); + } +} + void __show_regs(struct pt_regs *regs) { unsigned long flags; char buf[64]; #ifndef CONFIG_CPU_V7M - unsigned int domain, fs; + unsigned int domain; #ifdef CONFIG_CPU_SW_DOMAIN_PAN /* * Get the domain register for the parent context. In user @@ -106,14 +123,11 @@ void __show_regs(struct pt_regs *regs) */ if (user_mode(regs)) { domain = DACR_UACCESS_ENABLE; - fs = get_fs(); } else { domain = to_svc_pt_regs(regs)->dacr; - fs = to_svc_pt_regs(regs)->addr_limit; } #else domain = get_domain(); - fs = get_fs(); #endif #endif @@ -149,8 +163,6 @@ void __show_regs(struct pt_regs *regs) if ((domain & domain_mask(DOMAIN_USER)) == domain_val(DOMAIN_USER, DOMAIN_NOACCESS)) segment = "none"; - else if (fs == KERNEL_DS) - segment = "kernel"; else segment = "user"; @@ -188,7 +200,7 @@ void __show_regs(struct pt_regs *regs) void show_regs(struct pt_regs * regs) { __show_regs(regs); - dump_stack(); + dump_backtrace(regs, NULL, KERN_DEFAULT); } ATOMIC_NOTIFIER_HEAD(thread_notify_head); @@ -210,7 +222,6 @@ void flush_thread(void) flush_ptrace_hw_breakpoint(tsk); - memset(thread->used_cp, 0, sizeof(thread->used_cp)); memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); memset(&thread->fpstate, 0, sizeof(union fp_state)); @@ -219,16 +230,13 @@ void flush_thread(void) thread_notify(THREAD_NOTIFY_FLUSH, thread); } -void release_thread(struct task_struct *dead_task) -{ -} - asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); -int -copy_thread(unsigned long clone_flags, unsigned long stack_start, - unsigned long stk_sz, struct task_struct *p) +int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) { + unsigned long clone_flags = args->flags; + unsigned long stack_start = args->stack; + unsigned long tls = args->tls; struct thread_info *thread = task_thread_info(p); struct pt_regs *childregs = task_pt_regs(p); @@ -244,15 +252,15 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, thread->cpu_domain = get_domain(); #endif - if (likely(!(p->flags & PF_KTHREAD))) { + if (likely(!args->fn)) { *childregs = *current_pt_regs(); childregs->ARM_r0 = 0; if (stack_start) childregs->ARM_sp = stack_start; } else { memset(childregs, 0, sizeof(struct pt_regs)); - thread->cpu_context.r4 = stk_sz; - thread->cpu_context.r5 = stack_start; + thread->cpu_context.r4 = (unsigned long)args->fn_arg; + thread->cpu_context.r5 = (unsigned long)args->fn; childregs->ARM_cpsr = SVC_MODE; } thread->cpu_context.pc = (unsigned long)ret_from_fork; @@ -261,49 +269,19 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, clear_ptrace_hw_breakpoint(p); if (clone_flags & CLONE_SETTLS) - thread->tp_value[0] = childregs->ARM_r3; + thread->tp_value[0] = tls; thread->tp_value[1] = get_tpuser(); thread_notify(THREAD_NOTIFY_COPY, thread); -#ifdef CONFIG_STACKPROTECTOR_PER_TASK - thread->stack_canary = p->stack_canary; -#endif - return 0; } -/* - * Fill in the task's elfregs structure for a core dump. - */ -int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs) -{ - elf_core_copy_regs(elfregs, task_pt_regs(t)); - return 1; -} - -/* - * fill in the fpe structure for a core dump... - */ -int dump_fpu (struct pt_regs *regs, struct user_fp *fp) -{ - struct thread_info *thread = current_thread_info(); - int used_math = thread->used_cp[1] | thread->used_cp[2]; - - if (used_math) - memcpy(fp, &thread->fpstate.soft, sizeof (*fp)); - - return used_math != 0; -} -EXPORT_SYMBOL(dump_fpu); - -unsigned long get_wchan(struct task_struct *p) +unsigned long __get_wchan(struct task_struct *p) { struct stackframe frame; unsigned long stack_page; int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) - return 0; frame.fp = thread_saved_fp(p); frame.sp = thread_saved_sp(p); @@ -336,7 +314,7 @@ static int __init gate_vma_init(void) gate_vma.vm_page_prot = PAGE_READONLY_EXEC; gate_vma.vm_start = 0xffff0000; gate_vma.vm_end = 0xffff0000 + PAGE_SIZE; - gate_vma.vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; + vm_flags_init(&gate_vma, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC); return 0; } arch_initcall(gate_vma_init); @@ -391,7 +369,7 @@ static unsigned long sigpage_addr(const struct mm_struct *mm, slots = ((last - first) >> PAGE_SHIFT) + 1; - offset = get_random_int() % slots; + offset = get_random_u32_below(slots); addr = first + (offset << PAGE_SHIFT); @@ -431,7 +409,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) npages = 1; /* for sigpage */ npages += vdso_total_pages; - if (down_write_killable(&mm->mmap_sem)) + if (mmap_write_lock_killable(mm)) return -EINTR; hint = sigpage_addr(mm, npages); addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0); @@ -458,7 +436,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) arm_install_vdso(mm, addr + PAGE_SIZE); up_fail: - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return ret; } #endif diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index b606cded90cd..c421a899fc84 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -22,10 +22,9 @@ #include <linux/hw_breakpoint.h> #include <linux/regset.h> #include <linux/audit.h> -#include <linux/tracehook.h> #include <linux/unistd.h> -#include <asm/pgtable.h> +#include <asm/syscall.h> #include <asm/traps.h> #define CREATE_TRACE_POINTS @@ -219,8 +218,8 @@ static struct undef_hook arm_break_hook = { }; static struct undef_hook thumb_break_hook = { - .instr_mask = 0xffff, - .instr_val = 0xde01, + .instr_mask = 0xffffffff, + .instr_val = 0x0000de01, .cpsr_mask = PSR_T_BIT, .cpsr_val = PSR_T_BIT, .fn = break_trap, @@ -319,32 +318,6 @@ static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp) #endif -#ifdef CONFIG_CRUNCH -/* - * Get the child Crunch state. - */ -static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp) -{ - struct thread_info *thread = task_thread_info(tsk); - - crunch_task_disable(thread); /* force it to ram */ - return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE) - ? -EFAULT : 0; -} - -/* - * Set the child Crunch state. - */ -static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp) -{ - struct thread_info *thread = task_thread_info(tsk); - - crunch_task_release(thread); /* force a reload */ - return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE) - ? -EFAULT : 0; -} -#endif - #ifdef CONFIG_HAVE_HW_BREAKPOINT /* * Convert a virtual register number into an index for a thread_info @@ -570,14 +543,9 @@ out: static int gpr_get(struct task_struct *target, const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf) + struct membuf to) { - struct pt_regs *regs = task_pt_regs(target); - - return user_regset_copyout(&pos, &count, &kbuf, &ubuf, - regs, - 0, sizeof(*regs)); + return membuf_write(&to, task_pt_regs(target), sizeof(struct pt_regs)); } static int gpr_set(struct task_struct *target, @@ -603,12 +571,10 @@ static int gpr_set(struct task_struct *target, static int fpa_get(struct task_struct *target, const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf) + struct membuf to) { - return user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &task_thread_info(target)->fpstate, - 0, sizeof(struct user_fp)); + return membuf_write(&to, &task_thread_info(target)->fpstate, + sizeof(struct user_fp)); } static int fpa_set(struct task_struct *target, @@ -618,8 +584,6 @@ static int fpa_set(struct task_struct *target, { struct thread_info *thread = task_thread_info(target); - thread->used_cp[1] = thread->used_cp[2] = 1; - return user_regset_copyin(&pos, &count, &kbuf, &ubuf, &thread->fpstate, 0, sizeof(struct user_fp)); @@ -643,41 +607,20 @@ static int fpa_set(struct task_struct *target, * vfp_set() ignores this chunk * * 1 word for the FPSCR - * - * The bounds-checking logic built into user_regset_copyout and friends - * means that we can make a simple sequence of calls to map the relevant data - * to/from the specified slice of the user regset structure. */ static int vfp_get(struct task_struct *target, const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf) + struct membuf to) { - int ret; struct thread_info *thread = task_thread_info(target); struct vfp_hard_struct const *vfp = &thread->vfpstate.hard; - const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs); const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr); vfp_sync_hwstate(thread); - ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &vfp->fpregs, - user_fpregs_offset, - user_fpregs_offset + sizeof(vfp->fpregs)); - if (ret) - return ret; - - ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, - user_fpregs_offset + sizeof(vfp->fpregs), - user_fpscr_offset); - if (ret) - return ret; - - return user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &vfp->fpscr, - user_fpscr_offset, - user_fpscr_offset + sizeof(vfp->fpscr)); + membuf_write(&to, vfp->fpregs, sizeof(vfp->fpregs)); + membuf_zero(&to, user_fpscr_offset - sizeof(vfp->fpregs)); + return membuf_store(&to, vfp->fpscr); } /* @@ -706,11 +649,9 @@ static int vfp_set(struct task_struct *target, if (ret) return ret; - ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, - user_fpregs_offset + sizeof(new_vfp.fpregs), - user_fpscr_offset); - if (ret) - return ret; + user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + user_fpregs_offset + sizeof(new_vfp.fpregs), + user_fpscr_offset); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &new_vfp.fpscr, @@ -740,7 +681,7 @@ static const struct user_regset arm_regsets[] = { .n = ELF_NGREG, .size = sizeof(u32), .align = sizeof(u32), - .get = gpr_get, + .regset_get = gpr_get, .set = gpr_set }, [REGSET_FPR] = { @@ -752,7 +693,7 @@ static const struct user_regset arm_regsets[] = { .n = sizeof(struct user_fp) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), - .get = fpa_get, + .regset_get = fpa_get, .set = fpa_set }, #ifdef CONFIG_VFP @@ -765,7 +706,7 @@ static const struct user_regset arm_regsets[] = { .n = ARM_VFPREGS_SIZE / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), - .get = vfp_get, + .regset_get = vfp_get, .set = vfp_set }, #endif /* CONFIG_VFP */ @@ -840,20 +781,12 @@ long arch_ptrace(struct task_struct *child, long request, break; case PTRACE_SET_SYSCALL: - task_thread_info(child)->syscall = data; + if (data != -1) + data &= __NR_SYSCALL_MASK; + task_thread_info(child)->abi_syscall = data; ret = 0; break; -#ifdef CONFIG_CRUNCH - case PTRACE_GETCRUNCHREGS: - ret = ptrace_getcrunchregs(child, datap); - break; - - case PTRACE_SETCRUNCHREGS: - ret = ptrace_setcrunchregs(child, datap); - break; -#endif - #ifdef CONFIG_VFP case PTRACE_GETVFPREGS: ret = copy_regset_to_user(child, @@ -894,8 +827,7 @@ enum ptrace_syscall_dir { PTRACE_SYSCALL_EXIT, }; -static void tracehook_report_syscall(struct pt_regs *regs, - enum ptrace_syscall_dir dir) +static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir) { unsigned long ip; @@ -907,19 +839,19 @@ static void tracehook_report_syscall(struct pt_regs *regs, regs->ARM_ip = dir; if (dir == PTRACE_SYSCALL_EXIT) - tracehook_report_syscall_exit(regs, 0); - else if (tracehook_report_syscall_entry(regs)) - current_thread_info()->syscall = -1; + ptrace_report_syscall_exit(regs, 0); + else if (ptrace_report_syscall_entry(regs)) + current_thread_info()->abi_syscall = -1; regs->ARM_ip = ip; } -asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) +asmlinkage int syscall_trace_enter(struct pt_regs *regs) { - current_thread_info()->syscall = scno; + int scno; if (test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); + report_syscall(regs, PTRACE_SYSCALL_ENTER); /* Do seccomp after ptrace; syscall may have changed. */ #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER @@ -927,11 +859,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) return -1; #else /* XXX: remove this once OABI gets fixed */ - secure_computing_strict(current_thread_info()->syscall); + secure_computing_strict(syscall_get_nr(current, regs)); #endif /* Tracer or seccomp may have changed syscall. */ - scno = current_thread_info()->syscall; + scno = syscall_get_nr(current, regs); if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_enter(regs, scno); @@ -960,5 +892,5 @@ asmlinkage void syscall_trace_exit(struct pt_regs *regs) trace_sys_exit(regs, regs_return_value(regs)); if (test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); + report_syscall(regs, PTRACE_SYSCALL_EXIT); } diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c index bb18ed0539f4..3f0d5c3dae11 100644 --- a/arch/arm/kernel/reboot.c +++ b/arch/arm/kernel/reboot.c @@ -10,6 +10,7 @@ #include <asm/cacheflush.h> #include <asm/idmap.h> #include <asm/virt.h> +#include <asm/system_misc.h> #include "reboot.h" @@ -18,7 +19,6 @@ typedef void (*phys_reset_t)(unsigned long, bool); /* * Function pointers to optional machine specific functions */ -void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); @@ -88,11 +88,11 @@ void soft_restart(unsigned long addr) * to execute e.g. a RAM-based pin loop is not sufficient. This allows the * kexec'd kernel to use any and all RAM as it sees fit, without having to * avoid any code or data used by any SW CPU pin loop. The CPU hotplug - * functionality embodied in disable_nonboot_cpus() to achieve this. + * functionality embodied in smp_shutdown_nonboot_cpus() to achieve this. */ void machine_shutdown(void) { - disable_nonboot_cpus(); + smp_shutdown_nonboot_cpus(reboot_cpu); } /* @@ -117,9 +117,7 @@ void machine_power_off(void) { local_irq_disable(); smp_send_stop(); - - if (pm_power_off) - pm_power_off(); + do_kernel_power_off(); } /* @@ -138,10 +136,7 @@ void machine_restart(char *cmd) local_irq_disable(); smp_send_stop(); - if (arm_pm_restart) - arm_pm_restart(reboot_mode, cmd); - else - do_kernel_restart(cmd); + do_kernel_restart(cmd); /* Give a grace period for failure to restart of 1s */ mdelay(1000); diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S index 7eaa2ae7aff5..218d524360fc 100644 --- a/arch/arm/kernel/relocate_kernel.S +++ b/arch/arm/kernel/relocate_kernel.S @@ -5,14 +5,16 @@ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/asm-offsets.h> #include <asm/kexec.h> .align 3 /* not needed for this code, but keeps fncpy() happy */ ENTRY(relocate_new_kernel) - ldr r0,kexec_indirection_page - ldr r1,kexec_start_address + adr r7, relocate_new_kernel_end + ldr r0, [r7, #KEXEC_INDIR_PAGE] + ldr r1, [r7, #KEXEC_START_ADDR] /* * If there is no indirection page (we are doing crashdumps) @@ -25,26 +27,26 @@ ENTRY(relocate_new_kernel) ldr r3, [r0],#4 /* Is it a destination page. Put destination address to r4 */ - tst r3,#1,0 + tst r3,#1 beq 1f bic r4,r3,#1 b 0b 1: /* Is it an indirection page */ - tst r3,#2,0 + tst r3,#2 beq 1f bic r0,r3,#2 b 0b 1: /* are we done ? */ - tst r3,#4,0 + tst r3,#4 beq 1f b 2f 1: /* is it source ? */ - tst r3,#8,0 + tst r3,#8 beq 0b bic r3,r3,#8 mov r6,#1024 @@ -57,34 +59,16 @@ ENTRY(relocate_new_kernel) 2: /* Jump to relocated kernel */ - mov lr,r1 - mov r0,#0 - ldr r1,kexec_mach_type - ldr r2,kexec_boot_atags - ARM( ret lr ) - THUMB( bx lr ) - - .align - - .globl kexec_start_address -kexec_start_address: - .long 0x0 - - .globl kexec_indirection_page -kexec_indirection_page: - .long 0x0 - - .globl kexec_mach_type -kexec_mach_type: - .long 0x0 - - /* phy addr of the atags for the new kernel */ - .globl kexec_boot_atags -kexec_boot_atags: - .long 0x0 + mov lr, r1 + mov r0, #0 + ldr r1, [r7, #KEXEC_MACH_TYPE] + ldr r2, [r7, #KEXEC_R2] + ARM( ret lr ) + THUMB( bx lr ) ENDPROC(relocate_new_kernel) + .align 3 relocate_new_kernel_end: .globl relocate_new_kernel_size diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c index 7b42ac010fdf..ac15db66df4c 100644 --- a/arch/arm/kernel/return_address.c +++ b/arch/arm/kernel/return_address.c @@ -16,17 +16,17 @@ struct return_address_data { void *addr; }; -static int save_return_addr(struct stackframe *frame, void *d) +static bool save_return_addr(void *d, unsigned long pc) { struct return_address_data *data = d; if (!data->level) { - data->addr = (void *)frame->pc; + data->addr = (void *)pc; - return 1; + return false; } else { --data->level; - return 0; + return true; } } @@ -41,7 +41,13 @@ void *return_address(unsigned int level) frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_stack_pointer; frame.lr = (unsigned long)__builtin_return_address(0); - frame.pc = (unsigned long)return_address; +here: + frame.pc = (unsigned long)&&here; +#ifdef CONFIG_KRETPROBES + frame.kr_cur = NULL; + frame.tsk = current; +#endif + frame.ex_frame = false; walk_stackframe(&frame, save_return_addr, &data); diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index d0a464e317ea..7b33b157fca0 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -15,9 +15,10 @@ #include <linux/console.h> #include <linux/seq_file.h> #include <linux/screen_info.h> -#include <linux/of_platform.h> #include <linux/init.h> #include <linux/kexec.h> +#include <linux/libfdt.h> +#include <linux/of.h> #include <linux/of_fdt.h> #include <linux/cpu.h> #include <linux/interrupt.h> @@ -58,6 +59,7 @@ #include <asm/unwind.h> #include <asm/memblock.h> #include <asm/virt.h> +#include <asm/kasan.h> #include "atags.h" @@ -74,13 +76,6 @@ static int __init fpe_setup(char *line) __setup("fpe=", fpe_setup); #endif -extern void init_default_cache_policy(unsigned long); -extern void paging_init(const struct machine_desc *desc); -extern void early_mm_init(const struct machine_desc *); -extern void adjust_lowmem_bounds(void); -extern enum reboot_mode reboot_mode; -extern void setup_dma_zone(const struct machine_desc *desc); - unsigned int processor_id; EXPORT_SYMBOL(processor_id); unsigned int __machine_arch_type __read_mostly; @@ -139,10 +134,10 @@ EXPORT_SYMBOL(outer_cache); int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN; struct stack { - u32 irq[3]; - u32 abt[3]; - u32 und[3]; - u32 fiq[3]; + u32 irq[4]; + u32 abt[4]; + u32 und[4]; + u32 fiq[4]; } ____cacheline_aligned; #ifndef CONFIG_CPU_V7M @@ -448,6 +443,8 @@ static void __init cpuid_init_hwcaps(void) { int block; u32 isar5; + u32 isar6; + u32 pfr2; if (cpu_architecture() < CPU_ARCH_ARMv7) return; @@ -483,6 +480,18 @@ static void __init cpuid_init_hwcaps(void) block = cpuid_feature_extract_field(isar5, 16); if (block >= 1) elf_hwcap2 |= HWCAP2_CRC32; + + /* Check for Speculation barrier instruction */ + isar6 = read_cpuid_ext(CPUID_EXT_ISAR6); + block = cpuid_feature_extract_field(isar6, 12); + if (block >= 1) + elf_hwcap2 |= HWCAP2_SB; + + /* Check for Speculative Store Bypassing control */ + pfr2 = read_cpuid_ext(CPUID_EXT_PFR2); + block = cpuid_feature_extract_field(pfr2, 4); + if (block >= 1) + elf_hwcap2 |= HWCAP2_SSBS; } static void __init elf_hwcap_fixup(void) @@ -543,9 +552,11 @@ void notrace cpu_init(void) * In Thumb-2, msr with an immediate value is not allowed. */ #ifdef CONFIG_THUMB2_KERNEL -#define PLC "r" +#define PLC_l "l" +#define PLC_r "r" #else -#define PLC "I" +#define PLC_l "I" +#define PLC_r "I" #endif /* @@ -567,15 +578,15 @@ void notrace cpu_init(void) "msr cpsr_c, %9" : : "r" (stk), - PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), + PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), "I" (offsetof(struct stack, irq[0])), - PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE), + PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE), "I" (offsetof(struct stack, abt[0])), - PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE), + PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE), "I" (offsetof(struct stack, und[0])), - PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE), + PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE), "I" (offsetof(struct stack, fiq[0])), - PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) + PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE) : "r14"); #endif } @@ -763,7 +774,7 @@ int __init arm_add_memory(u64 start, u64 size) #ifndef CONFIG_PHYS_ADDR_T_64BIT if (aligned_start > ULONG_MAX) { pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n", - (long long)start); + start); return -EINVAL; } @@ -843,20 +854,26 @@ early_param("mem", early_mem); static void __init request_standard_resources(const struct machine_desc *mdesc) { - struct memblock_region *region; + phys_addr_t start, end, res_end; struct resource *res; + u64 i; kernel_code.start = virt_to_phys(_text); kernel_code.end = virt_to_phys(__init_begin - 1); kernel_data.start = virt_to_phys(_sdata); kernel_data.end = virt_to_phys(_end - 1); - for_each_memblock(memory, region) { - phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); - phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; + for_each_mem_range(i, &start, &end) { unsigned long boot_alias_start; /* + * In memblock, end points to the first byte after the + * range while in resourses, end points to the last byte in + * the range. + */ + res_end = end - 1; + + /* * Some systems have a special memory alias which is only * used for booting. We need to advertise this region to * kexec-tools so they know where bootable RAM is located. @@ -869,7 +886,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) __func__, sizeof(*res)); res->name = "System RAM (boot alias)"; res->start = boot_alias_start; - res->end = phys_to_idmap(end); + res->end = phys_to_idmap(res_end); res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; request_resource(&iomem_resource, res); } @@ -880,7 +897,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) sizeof(*res)); res->name = "System RAM"; res->start = start; - res->end = end; + res->end = res_end; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; request_resource(&iomem_resource, res); @@ -911,9 +928,8 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) request_resource(&ioport_resource, &lp2); } -#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \ - defined(CONFIG_EFI) -struct screen_info screen_info = { +#if defined(CONFIG_VGA_CONSOLE) +struct screen_info vgacon_screen_info = { .orig_video_lines = 30, .orig_video_cols = 80, .orig_video_mode = 0, @@ -963,7 +979,7 @@ static int __init init_machine_late(void) } late_initcall(init_machine_late); -#ifdef CONFIG_KEXEC +#ifdef CONFIG_CRASH_RESERVE /* * The crash region must be aligned to 128MB to avoid * zImage relocating below the reserved region. @@ -993,8 +1009,10 @@ static void __init reserve_crashkernel(void) total_mem = get_total_mem(); ret = parse_crashkernel(boot_command_line, total_mem, - &crash_size, &crash_base); - if (ret) + &crash_size, &crash_base, + NULL, NULL); + /* invalid value specified or crashkernel=0 */ + if (ret || !crash_size) return; if (crash_base <= 0) { @@ -1002,31 +1020,25 @@ static void __init reserve_crashkernel(void) unsigned long long lowmem_max = __pa(high_memory - 1) + 1; if (crash_max > lowmem_max) crash_max = lowmem_max; - crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max, - crash_size, CRASH_ALIGN); + + crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN, + CRASH_ALIGN, crash_max); if (!crash_base) { pr_err("crashkernel reservation failed - No suitable area found.\n"); return; } } else { + unsigned long long crash_max = crash_base + crash_size; unsigned long long start; - start = memblock_find_in_range(crash_base, - crash_base + crash_size, - crash_size, SECTION_SIZE); - if (start != crash_base) { + start = memblock_phys_alloc_range(crash_size, SECTION_SIZE, + crash_base, crash_max); + if (!start) { pr_err("crashkernel reservation failed - memory is in use.\n"); return; } } - ret = memblock_reserve(crash_base, crash_size); - if (ret < 0) { - pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n", - (unsigned long)crash_base); - return; - } - pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n", (unsigned long)(crash_size >> 20), (unsigned long)(crash_base >> 20), @@ -1054,7 +1066,7 @@ static void __init reserve_crashkernel(void) } #else static inline void reserve_crashkernel(void) {} -#endif /* CONFIG_KEXEC */ +#endif /* CONFIG_CRASH_RESERVE*/ void __init hyp_mode_check(void) { @@ -1073,21 +1085,43 @@ void __init hyp_mode_check(void) #endif } +static void (*__arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); + +static int arm_restart(struct notifier_block *nb, unsigned long action, + void *data) +{ + __arm_pm_restart(action, data); + return NOTIFY_DONE; +} + +static struct notifier_block arm_restart_nb = { + .notifier_call = arm_restart, + .priority = 128, +}; + void __init setup_arch(char **cmdline_p) { - const struct machine_desc *mdesc; + const struct machine_desc *mdesc = NULL; + void *atags_vaddr = NULL; + + if (__atags_pointer) + atags_vaddr = FDT_VIRT_BASE(__atags_pointer); setup_processor(); - mdesc = setup_machine_fdt(__atags_pointer); + if (atags_vaddr) { + mdesc = setup_machine_fdt(atags_vaddr); + if (mdesc) + memblock_reserve(__atags_pointer, + fdt_totalsize(atags_vaddr)); + } if (!mdesc) - mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type); + mdesc = setup_machine_tags(atags_vaddr, __machine_arch_type); if (!mdesc) { early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n"); early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type, __atags_pointer); if (__atags_pointer) - early_print(" r2[]=%*ph\n", 16, - phys_to_virt(__atags_pointer)); + early_print(" r2[]=%*ph\n", 16, atags_vaddr); dump_machine_table(); } @@ -1098,13 +1132,10 @@ void __init setup_arch(char **cmdline_p) if (mdesc->reboot_mode != REBOOT_HARD) reboot_mode = mdesc->reboot_mode; - init_mm.start_code = (unsigned long) _text; - init_mm.end_code = (unsigned long) _etext; - init_mm.end_data = (unsigned long) _edata; - init_mm.brk = (unsigned long) _end; + setup_initial_init_mm(_text, _etext, _edata, _end); /* populate cmd_line too for later use, preserving boot_command_line */ - strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); + strscpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); *cmdline_p = cmd_line; early_fixmap_init(); @@ -1117,10 +1148,10 @@ void __init setup_arch(char **cmdline_p) #endif setup_dma_zone(mdesc); xen_early_init(); - efi_init(); + arm_efi_init(); /* * Make sure the calculation for lowmem/highmem is set appropriately - * before reserving/allocating any mmeory + * before reserving/allocating any memory */ adjust_lowmem_bounds(); arm_memblock_init(mdesc); @@ -1130,10 +1161,13 @@ void __init setup_arch(char **cmdline_p) early_ioremap_reset(); paging_init(mdesc); + kasan_init(); request_standard_resources(mdesc); - if (mdesc->restart) - arm_pm_restart = mdesc->restart; + if (mdesc->restart) { + __arm_pm_restart = mdesc->restart; + register_restart_handler(&arm_restart_nb); + } unflatten_device_tree(); @@ -1157,15 +1191,9 @@ void __init setup_arch(char **cmdline_p) reserve_crashkernel(); -#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER - handle_arch_irq = mdesc->handle_irq; -#endif - #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) - conswitchp = &vga_con; -#elif defined(CONFIG_DUMMY_CONSOLE) - conswitchp = &dummy_con; + vgacon_register_screen(&vgacon_screen_info); #endif #endif @@ -1224,6 +1252,12 @@ static const char *hwcap_str[] = { "vfpd32", "lpae", "evtstrm", + "fphp", + "asimdhp", + "asimddp", + "asimdfhm", + "asimdbf16", + "i8mm", NULL }; @@ -1233,6 +1267,8 @@ static const char *hwcap2_str[] = { "sha1", "sha2", "crc32", + "sb", + "ssbs", NULL }; diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index ab2568996ddb..79a6730fa0eb 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -9,7 +9,7 @@ #include <linux/signal.h> #include <linux/personality.h> #include <linux/uaccess.h> -#include <linux/tracehook.h> +#include <linux/resume_user_mode.h> #include <linux/uprobes.h> #include <linux/syscalls.h> @@ -18,6 +18,7 @@ #include <asm/traps.h> #include <asm/unistd.h> #include <asm/vfp.h> +#include <asm/syscalls.h> #include "signal.h" @@ -25,40 +26,6 @@ extern const unsigned long sigreturn_codes[17]; static unsigned long signal_return_offset; -#ifdef CONFIG_CRUNCH -static int preserve_crunch_context(struct crunch_sigframe __user *frame) -{ - char kbuf[sizeof(*frame) + 8]; - struct crunch_sigframe *kframe; - - /* the crunch context must be 64 bit aligned */ - kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7); - kframe->magic = CRUNCH_MAGIC; - kframe->size = CRUNCH_STORAGE_SIZE; - crunch_task_copy(current_thread_info(), &kframe->storage); - return __copy_to_user(frame, kframe, sizeof(*frame)); -} - -static int restore_crunch_context(char __user **auxp) -{ - struct crunch_sigframe __user *frame = - (struct crunch_sigframe __user *)*auxp; - char kbuf[sizeof(*frame) + 8]; - struct crunch_sigframe *kframe; - - /* the crunch context must be 64 bit aligned */ - kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7); - if (__copy_from_user(kframe, frame, sizeof(*frame))) - return -1; - if (kframe->magic != CRUNCH_MAGIC || - kframe->size != CRUNCH_STORAGE_SIZE) - return -1; - *auxp += CRUNCH_STORAGE_SIZE; - crunch_task_restore(current_thread_info(), &kframe->storage); - return 0; -} -#endif - #ifdef CONFIG_IWMMXT static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame) @@ -205,10 +172,6 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) err |= !valid_user_regs(regs); aux = (char __user *) sf->uc.uc_regspace; -#ifdef CONFIG_CRUNCH - if (err == 0) - err |= restore_crunch_context(&aux); -#endif #ifdef CONFIG_IWMMXT if (err == 0) err |= restore_iwmmxt_context(&aux); @@ -321,10 +284,6 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); aux = (struct aux_sigframe __user *) sf->uc.uc_regspace; -#ifdef CONFIG_CRUNCH - if (err == 0) - err |= preserve_crunch_context(&aux->crunch); -#endif #ifdef CONFIG_IWMMXT if (err == 0) err |= preserve_iwmmxt_context(&aux->iwmmxt); @@ -596,7 +555,7 @@ static int do_signal(struct pt_regs *regs, int syscall) switch (retval) { case -ERESTART_RESTARTBLOCK: restart -= 2; - /* Fall through */ + fallthrough; case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: @@ -655,7 +614,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) if (unlikely(!user_mode(regs))) return 0; local_irq_enable(); - if (thread_flags & _TIF_SIGPENDING) { + if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) { int restart = do_signal(regs, syscall); if (unlikely(restart)) { /* @@ -669,13 +628,11 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) } else if (thread_flags & _TIF_UPROBE) { uprobe_notify_resume(regs); } else { - clear_thread_flag(TIF_NOTIFY_RESUME); - tracehook_notify_resume(regs); - rseq_handle_notify_resume(NULL, regs); + resume_user_mode_work(regs); } } local_irq_disable(); - thread_flags = current_thread_info()->flags; + thread_flags = read_thread_flags(); } while (thread_flags & _TIF_WORK_MASK); return 0; } @@ -694,31 +651,67 @@ struct page *get_signal_page(void) addr = page_address(page); + /* Poison the entire page */ + memset32(addr, __opcode_to_mem_arm(0xe7fddef1), + PAGE_SIZE / sizeof(u32)); + /* Give the signal return code some randomness */ - offset = 0x200 + (get_random_int() & 0x7fc); + offset = 0x200 + (get_random_u16() & 0x7fc); signal_return_offset = offset; - /* - * Copy signal return handlers into the vector page, and - * set sigreturn to be a pointer to these. - */ + /* Copy signal return handlers into the page */ memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); - ptr = (unsigned long)addr + offset; - flush_icache_range(ptr, ptr + sizeof(sigreturn_codes)); + /* Flush out all instructions in this page */ + ptr = (unsigned long)addr; + flush_icache_range(ptr, ptr + PAGE_SIZE); return page; } -/* Defer to generic check */ -asmlinkage void addr_limit_check_failed(void) -{ - addr_limit_user_check(); -} - #ifdef CONFIG_DEBUG_RSEQ asmlinkage void do_rseq_syscall(struct pt_regs *regs) { rseq_syscall(regs); } #endif + +/* + * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as + * changes likely come with new fields that should be added below. + */ +static_assert(NSIGILL == 11); +static_assert(NSIGFPE == 15); +static_assert(NSIGSEGV == 10); +static_assert(NSIGBUS == 5); +static_assert(NSIGTRAP == 6); +static_assert(NSIGCHLD == 6); +static_assert(NSIGSYS == 2); +static_assert(sizeof(siginfo_t) == 128); +static_assert(__alignof__(siginfo_t) == 4); +static_assert(offsetof(siginfo_t, si_signo) == 0x00); +static_assert(offsetof(siginfo_t, si_errno) == 0x04); +static_assert(offsetof(siginfo_t, si_code) == 0x08); +static_assert(offsetof(siginfo_t, si_pid) == 0x0c); +static_assert(offsetof(siginfo_t, si_uid) == 0x10); +static_assert(offsetof(siginfo_t, si_tid) == 0x0c); +static_assert(offsetof(siginfo_t, si_overrun) == 0x10); +static_assert(offsetof(siginfo_t, si_status) == 0x14); +static_assert(offsetof(siginfo_t, si_utime) == 0x18); +static_assert(offsetof(siginfo_t, si_stime) == 0x1c); +static_assert(offsetof(siginfo_t, si_value) == 0x14); +static_assert(offsetof(siginfo_t, si_int) == 0x14); +static_assert(offsetof(siginfo_t, si_ptr) == 0x14); +static_assert(offsetof(siginfo_t, si_addr) == 0x0c); +static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x10); +static_assert(offsetof(siginfo_t, si_lower) == 0x14); +static_assert(offsetof(siginfo_t, si_upper) == 0x18); +static_assert(offsetof(siginfo_t, si_pkey) == 0x14); +static_assert(offsetof(siginfo_t, si_perf_data) == 0x10); +static_assert(offsetof(siginfo_t, si_perf_type) == 0x14); +static_assert(offsetof(siginfo_t, si_perf_flags) == 0x18); +static_assert(offsetof(siginfo_t, si_band) == 0x0c); +static_assert(offsetof(siginfo_t, si_fd) == 0x10); +static_assert(offsetof(siginfo_t, si_call_addr) == 0x0c); +static_assert(offsetof(siginfo_t, si_syscall) == 0x10); +static_assert(offsetof(siginfo_t, si_arch) == 0x14); diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index 5dc8b80bb693..93afd1005b43 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -67,13 +67,20 @@ ENTRY(__cpu_suspend) ldr r4, =cpu_suspend_size #endif mov r5, sp @ current virtual SP +#ifdef CONFIG_VMAP_STACK + @ Run the suspend code from the overflow stack so we don't have to rely + @ on vmalloc-to-phys conversions anywhere in the arch suspend code. + @ The original SP value captured in R5 will be restored on the way out. + ldr_this_cpu sp, overflow_stack_ptr, r6, r7 +#endif add r4, r4, #12 @ Space for pgd, virt sp, phys resume fn sub sp, sp, r4 @ allocate CPU state on stack ldr r3, =sleep_save_sp stmfd sp!, {r0, r1} @ save suspend func arg and pointer ldr r3, [r3, #SLEEP_SAVE_SP_VIRT] - ALT_SMP(ldr r0, =mpidr_hash) + ALT_SMP(W(nop)) @ don't use adr_l inside ALT_SMP() ALT_UP_B(1f) + adr_l r0, mpidr_hash /* This ldmia relies on the memory layout of the mpidr_hash struct */ ldmia r0, {r1, r6-r8} @ r1 = mpidr mask (r6,r7,r8) = l[0,1,2] shifts compute_mpidr_hash r0, r6, r7, r8, r2, r1 @@ -112,7 +119,18 @@ ENTRY(cpu_resume_mmu) ENDPROC(cpu_resume_mmu) .popsection cpu_resume_after_mmu: +#if defined(CONFIG_VMAP_STACK) && !defined(CONFIG_ARM_LPAE) + @ Before using the vmap'ed stack, we have to switch to swapper_pg_dir + @ as the ID map does not cover the vmalloc region. + mrc p15, 0, ip, c2, c0, 1 @ read TTBR1 + mcr p15, 0, ip, c2, c0, 0 @ set TTBR0 + instr_sync +#endif bl cpu_init @ restore the und/abt/irq banked regs +#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK) + mov r0, sp + bl kasan_unpoison_task_stack_below +#endif mov r0, #0 @ return zero on success ldmfd sp!, {r4 - r11, pc} ENDPROC(cpu_resume_after_mmu) @@ -147,9 +165,8 @@ no_hyp: mov r1, #0 ALT_SMP(mrc p15, 0, r0, c0, c0, 5) ALT_UP_B(1f) - adr r2, mpidr_hash_ptr - ldr r3, [r2] - add r2, r2, r3 @ r2 = struct mpidr_hash phys address + adr_l r2, mpidr_hash @ r2 = struct mpidr_hash phys address + /* * This ldmia relies on the memory layout of the mpidr_hash * struct mpidr_hash. @@ -157,10 +174,7 @@ no_hyp: ldmia r2, { r3-r6 } @ r3 = mpidr mask (r4,r5,r6) = l[0,1,2] shifts compute_mpidr_hash r1, r4, r5, r6, r0, r3 1: - adr r0, _sleep_save_sp - ldr r2, [r0] - add r0, r0, r2 - ldr r0, [r0, #SLEEP_SAVE_SP_PHYS] + ldr_l r0, sleep_save_sp + SLEEP_SAVE_SP_PHYS ldr r0, [r0, r1, lsl #2] @ load phys pgd, stack, resume fn @@ -177,12 +191,6 @@ ENDPROC(cpu_resume_arm) ENDPROC(cpu_resume_no_hyp) #endif - .align 2 -_sleep_save_sp: - .long sleep_save_sp - . -mpidr_hash_ptr: - .long mpidr_hash - . @ mpidr_hash struct offset - .data .align 2 .type sleep_save_sp, #object diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S index 00664c78faca..931df62a7831 100644 --- a/arch/arm/kernel/smccc-call.S +++ b/arch/arm/kernel/smccc-call.S @@ -3,7 +3,9 @@ * Copyright (c) 2015, Linaro Limited */ #include <linux/linkage.h> +#include <linux/arm-smccc.h> +#include <asm/asm-offsets.h> #include <asm/opcodes-sec.h> #include <asm/opcodes-virt.h> #include <asm/unwind.h> @@ -27,7 +29,14 @@ UNWIND( .fnstart) UNWIND( .save {r4-r7}) ldm r12, {r4-r7} \instr - pop {r4-r7} + ldr r4, [sp, #36] + cmp r4, #0 + beq 1f // No quirk structure + ldr r5, [r4, #ARM_SMCCC_QUIRK_ID_OFFS] + cmp r5, #ARM_SMCCC_QUIRK_QCOM_A6 + bne 1f // No quirk present + str r6, [r4, #ARM_SMCCC_QUIRK_STATE_OFFS] +1: pop {r4-r7} ldr r12, [sp, #(4 * 4)] stm r12, {r0-r3} bx lr diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 46e1be9e57a8..3431c0553f45 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -26,6 +26,7 @@ #include <linux/completion.h> #include <linux/cpufreq.h> #include <linux/irq_work.h> +#include <linux/kernel_stat.h> #include <linux/atomic.h> #include <asm/bugs.h> @@ -37,8 +38,6 @@ #include <asm/idmap.h> #include <asm/topology.h> #include <asm/mmu_context.h> -#include <asm/pgtable.h> -#include <asm/pgalloc.h> #include <asm/procinfo.h> #include <asm/processor.h> #include <asm/sections.h> @@ -49,7 +48,6 @@ #include <asm/mach/arch.h> #include <asm/mpu.h> -#define CREATE_TRACE_POINTS #include <trace/events/ipi.h> /* @@ -67,18 +65,26 @@ enum ipi_msg_type { IPI_CPU_STOP, IPI_IRQ_WORK, IPI_COMPLETION, + NR_IPI, /* * CPU_BACKTRACE is special and not included in NR_IPI * or tracable with trace_ipi_* */ - IPI_CPU_BACKTRACE, + IPI_CPU_BACKTRACE = NR_IPI, /* * SGI8-15 can be reserved by secure firmware, and thus may * not be usable by the kernel. Please keep the above limited * to at most 8 entries. */ + MAX_IPI }; +static int ipi_irq_base __read_mostly; +static int nr_ipi __read_mostly = NR_IPI; +static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly; + +static void ipi_setup(int cpu); + static DECLARE_COMPLETION(cpu_running); static struct smp_operations smp_ops __ro_after_init; @@ -146,6 +152,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) secondary_data.pgdir = virt_to_phys(idmap_pgd); secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir); #endif + secondary_data.task = idle; sync_cache_w(&secondary_data); /* @@ -228,6 +235,17 @@ int platform_can_hotplug_cpu(unsigned int cpu) return cpu != 0; } +static void ipi_teardown(int cpu) +{ + int i; + + if (WARN_ON_ONCE(!ipi_irq_base)) + return; + + for (i = 0; i < nr_ipi; i++) + disable_percpu_irq(ipi_irq_base + i); +} + /* * __cpu_disable runs on the processor to be shutdown. */ @@ -249,6 +267,7 @@ int __cpu_disable(void) * and we must not schedule until we're ready to give up the cpu. */ set_cpu_online(cpu, false); + ipi_teardown(cpu); /* * OK - migrate IRQs away from this CPU @@ -269,15 +288,11 @@ int __cpu_disable(void) } /* - * called on the thread which is asking for a CPU to be shutdown - - * waits until shutdown has completed, or it is timed out. + * called on the thread which is asking for a CPU to be shutdown after the + * shutdown completed. */ -void __cpu_die(unsigned int cpu) +void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { - if (!cpu_wait_death(cpu, 5)) { - pr_err("CPU%u: cpu didn't die\n", cpu); - return; - } pr_debug("CPU%u: shutdown\n", cpu); clear_tasks_mm_cpumask(cpu); @@ -300,7 +315,7 @@ void __cpu_die(unsigned int cpu) * of the other hotplug-cpu capable cores, so presumably coming * out of idle fixes this. */ -void arch_cpu_idle_dead(void) +void __noreturn arch_cpu_idle_dead(void) { unsigned int cpu = smp_processor_id(); @@ -317,11 +332,11 @@ void arch_cpu_idle_dead(void) flush_cache_louis(); /* - * Tell __cpu_die() that this CPU is now safe to dispose of. Once - * this returns, power and/or clocks can be removed at any point - * from this CPU and its cache by platform_cpu_kill(). + * Tell cpuhp_bp_sync_dead() that this CPU is now safe to dispose + * of. Once this returns, power and/or clocks can be removed at + * any point from this CPU and its cache by platform_cpu_kill(). */ - (void)cpu_report_death(); + cpuhp_ap_report_dead(); /* * Ensure that the cache lines associated with that completion are @@ -356,9 +371,14 @@ void arch_cpu_idle_dead(void) */ __asm__("mov sp, %0\n" " mov fp, #0\n" + " mov r0, %1\n" " b secondary_start_kernel" : - : "r" (task_stack_page(current) + THREAD_SIZE - 8)); + : "r" (task_stack_page(current) + THREAD_SIZE - 8), + "r" (current) + : "r0"); + + unreachable(); } #endif /* CONFIG_HOTPLUG_CPU */ @@ -377,15 +397,23 @@ static void smp_store_cpu_info(unsigned int cpuid) check_cpu_icache_size(cpuid); } +static void set_current(struct task_struct *cur) +{ + /* Set TPIDRURO */ + asm("mcr p15, 0, %0, c13, c0, 3" :: "r"(cur) : "memory"); +} + /* * This is the secondary CPU boot entry. We're using this CPUs * idle thread stack, but a set of temporary page tables. */ -asmlinkage void secondary_start_kernel(void) +asmlinkage void secondary_start_kernel(struct task_struct *task) { struct mm_struct *mm = &init_mm; unsigned int cpu; + set_current(task); + secondary_biglittle_init(); /* @@ -413,7 +441,6 @@ asmlinkage void secondary_start_kernel(void) #endif pr_debug("CPU%u: Booted secondary processor\n", cpu); - preempt_disable(); trace_hardirqs_off(); /* @@ -424,6 +451,8 @@ asmlinkage void secondary_start_kernel(void) notify_cpu_starting(cpu); + ipi_setup(cpu); + calibrate_delay(); smp_store_cpu_info(cpu); @@ -502,57 +531,35 @@ void __init smp_prepare_cpus(unsigned int max_cpus) } } -static void (*__smp_cross_call)(const struct cpumask *, unsigned int); - -void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) -{ - if (!__smp_cross_call) - __smp_cross_call = fn; -} - static const char *ipi_types[NR_IPI] __tracepoint_string = { -#define S(x,s) [x] = s - S(IPI_WAKEUP, "CPU wakeup interrupts"), - S(IPI_TIMER, "Timer broadcast interrupts"), - S(IPI_RESCHEDULE, "Rescheduling interrupts"), - S(IPI_CALL_FUNC, "Function call interrupts"), - S(IPI_CPU_STOP, "CPU stop interrupts"), - S(IPI_IRQ_WORK, "IRQ work interrupts"), - S(IPI_COMPLETION, "completion interrupts"), + [IPI_WAKEUP] = "CPU wakeup interrupts", + [IPI_TIMER] = "Timer broadcast interrupts", + [IPI_RESCHEDULE] = "Rescheduling interrupts", + [IPI_CALL_FUNC] = "Function call interrupts", + [IPI_CPU_STOP] = "CPU stop interrupts", + [IPI_IRQ_WORK] = "IRQ work interrupts", + [IPI_COMPLETION] = "completion interrupts", }; -static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) -{ - trace_ipi_raise_rcuidle(target, ipi_types[ipinr]); - __smp_cross_call(target, ipinr); -} +static void smp_cross_call(const struct cpumask *target, unsigned int ipinr); void show_ipi_list(struct seq_file *p, int prec) { unsigned int cpu, i; for (i = 0; i < NR_IPI; i++) { + if (!ipi_desc[i]) + continue; + seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); for_each_online_cpu(cpu) - seq_printf(p, "%10u ", - __get_irq_stat(cpu, ipi_irqs[i])); + seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu)); seq_printf(p, " %s\n", ipi_types[i]); } } -u64 smp_irq_stat_cpu(unsigned int cpu) -{ - u64 sum = 0; - int i; - - for (i = 0; i < NR_IPI; i++) - sum += __get_irq_stat(cpu, ipi_irqs[i]); - - return sum; -} - void arch_send_call_function_ipi_mask(const struct cpumask *mask) { smp_cross_call(mask, IPI_CALL_FUNC); @@ -590,6 +597,8 @@ static DEFINE_RAW_SPINLOCK(stop_lock); */ static void ipi_cpu_stop(unsigned int cpu) { + local_fiq_disable(); + if (system_state <= SYSTEM_RUNNING) { raw_spin_lock(&stop_lock); pr_crit("CPU%u: stopping\n", cpu); @@ -599,9 +608,6 @@ static void ipi_cpu_stop(unsigned int cpu) set_cpu_online(cpu, false); - local_fiq_disable(); - local_irq_disable(); - while (1) { cpu_relax(); wfe(); @@ -624,20 +630,12 @@ static void ipi_complete(unsigned int cpu) /* * Main handler for inter-processor interrupts */ -asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) -{ - handle_IPI(ipinr, regs); -} - -void handle_IPI(int ipinr, struct pt_regs *regs) +static void do_handle_IPI(int ipinr) { unsigned int cpu = smp_processor_id(); - struct pt_regs *old_regs = set_irq_regs(regs); - if ((unsigned)ipinr < NR_IPI) { - trace_ipi_entry_rcuidle(ipi_types[ipinr]); - __inc_irq_stat(cpu, ipi_irqs[ipinr]); - } + if ((unsigned)ipinr < NR_IPI) + trace_ipi_entry(ipi_types[ipinr]); switch (ipinr) { case IPI_WAKEUP: @@ -645,9 +643,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs) #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST case IPI_TIMER: - irq_enter(); tick_receive_broadcast(); - irq_exit(); break; #endif @@ -656,37 +652,27 @@ void handle_IPI(int ipinr, struct pt_regs *regs) break; case IPI_CALL_FUNC: - irq_enter(); generic_smp_call_function_interrupt(); - irq_exit(); break; case IPI_CPU_STOP: - irq_enter(); ipi_cpu_stop(cpu); - irq_exit(); break; #ifdef CONFIG_IRQ_WORK case IPI_IRQ_WORK: - irq_enter(); irq_work_run(); - irq_exit(); break; #endif case IPI_COMPLETION: - irq_enter(); ipi_complete(cpu); - irq_exit(); break; case IPI_CPU_BACKTRACE: - printk_nmi_enter(); - irq_enter(); - nmi_cpu_backtrace(regs); - irq_exit(); - printk_nmi_exit(); + printk_deferred_enter(); + nmi_cpu_backtrace(get_irq_regs()); + printk_deferred_exit(); break; default: @@ -696,11 +682,69 @@ void handle_IPI(int ipinr, struct pt_regs *regs) } if ((unsigned)ipinr < NR_IPI) - trace_ipi_exit_rcuidle(ipi_types[ipinr]); + trace_ipi_exit(ipi_types[ipinr]); +} + +/* Legacy version, should go away once all irqchips have been converted */ +void handle_IPI(int ipinr, struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + + irq_enter(); + do_handle_IPI(ipinr); + irq_exit(); + set_irq_regs(old_regs); } -void smp_send_reschedule(int cpu) +static irqreturn_t ipi_handler(int irq, void *data) +{ + do_handle_IPI(irq - ipi_irq_base); + return IRQ_HANDLED; +} + +static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) +{ + trace_ipi_raise(target, ipi_types[ipinr]); + __ipi_send_mask(ipi_desc[ipinr], target); +} + +static void ipi_setup(int cpu) +{ + int i; + + if (WARN_ON_ONCE(!ipi_irq_base)) + return; + + for (i = 0; i < nr_ipi; i++) + enable_percpu_irq(ipi_irq_base + i, 0); +} + +void __init set_smp_ipi_range(int ipi_base, int n) +{ + int i; + + WARN_ON(n < MAX_IPI); + nr_ipi = min(n, MAX_IPI); + + for (i = 0; i < nr_ipi; i++) { + int err; + + err = request_percpu_irq(ipi_base + i, ipi_handler, + "IPI", &irq_stat); + WARN_ON(err); + + ipi_desc[i] = irq_to_desc(ipi_base + i); + irq_set_status_flags(ipi_base + i, IRQ_HIDDEN); + } + + ipi_irq_base = ipi_base; + + /* Setup the boot CPU immediately */ + ipi_setup(smp_processor_id()); +} + +void arch_smp_send_reschedule(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } @@ -730,7 +774,7 @@ void smp_send_stop(void) * kdump fails. So split out the panic_smp_self_stop() and add * set_cpu_online(smp_processor_id(), false). */ -void panic_smp_self_stop(void) +void __noreturn panic_smp_self_stop(void) { pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n", smp_processor_id()); @@ -739,14 +783,6 @@ void panic_smp_self_stop(void) cpu_relax(); } -/* - * not supported here - */ -int setup_profiling_timer(unsigned int multiplier) -{ - return -EINVAL; -} - #ifdef CONFIG_CPU_FREQ static DEFINE_PER_CPU(unsigned long, l_p_j_ref); @@ -807,10 +843,10 @@ core_initcall(register_cpufreq_notifier); static void raise_nmi(cpumask_t *mask) { - __smp_cross_call(mask, IPI_CPU_BACKTRACE); + __ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask); } -void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu) { - nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi); + nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_nmi); } diff --git a/arch/arm/kernel/spectre.c b/arch/arm/kernel/spectre.c new file mode 100644 index 000000000000..0dcefc36fb7a --- /dev/null +++ b/arch/arm/kernel/spectre.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/bpf.h> +#include <linux/cpu.h> +#include <linux/device.h> + +#include <asm/spectre.h> + +static bool _unprivileged_ebpf_enabled(void) +{ +#ifdef CONFIG_BPF_SYSCALL + return !sysctl_unprivileged_bpf_disabled; +#else + return false; +#endif +} + +ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "Mitigation: __user pointer sanitization\n"); +} + +static unsigned int spectre_v2_state; +static unsigned int spectre_v2_methods; + +void spectre_v2_update_state(unsigned int state, unsigned int method) +{ + if (state > spectre_v2_state) + spectre_v2_state = state; + spectre_v2_methods |= method; +} + +ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, + char *buf) +{ + const char *method; + + if (spectre_v2_state == SPECTRE_UNAFFECTED) + return sprintf(buf, "%s\n", "Not affected"); + + if (spectre_v2_state != SPECTRE_MITIGATED) + return sprintf(buf, "%s\n", "Vulnerable"); + + if (_unprivileged_ebpf_enabled()) + return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n"); + + switch (spectre_v2_methods) { + case SPECTRE_V2_METHOD_BPIALL: + method = "Branch predictor hardening"; + break; + + case SPECTRE_V2_METHOD_ICIALLU: + method = "I-cache invalidation"; + break; + + case SPECTRE_V2_METHOD_SMC: + case SPECTRE_V2_METHOD_HVC: + method = "Firmware call"; + break; + + case SPECTRE_V2_METHOD_LOOP8: + method = "History overwrite"; + break; + + default: + method = "Multiple mitigations"; + break; + } + + return sprintf(buf, "Mitigation: %s\n", method); +} diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index 71778bb0475b..620aa82e3bdd 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only #include <linux/export.h> +#include <linux/kprobes.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/stacktrace.h> @@ -8,6 +9,8 @@ #include <asm/stacktrace.h> #include <asm/traps.h> +#include "reboot.h" + #if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) /* * Unwind the current stack frame and store the new register values in the @@ -22,38 +25,114 @@ * A simple function epilogue looks like this: * ldm sp, {fp, sp, pc} * + * When compiled with clang, pc and sp are not pushed. A simple function + * prologue looks like this when built with clang: + * + * stmdb {..., fp, lr} + * add fp, sp, #x + * sub sp, sp, #y + * + * A simple function epilogue looks like this when built with clang: + * + * sub sp, fp, #x + * ldm {..., fp, pc} + * + * * Note that with framepointer enabled, even the leaf functions have the same * prologue and epilogue, therefore we can ignore the LR value in this case. */ -int notrace unwind_frame(struct stackframe *frame) + +extern unsigned long call_with_stack_end; + +static int frame_pointer_check(struct stackframe *frame) { unsigned long high, low; unsigned long fp = frame->fp; + unsigned long pc = frame->pc; + + /* + * call_with_stack() is the only place we allow SP to jump from one + * stack to another, with FP and SP pointing to different stacks, + * skipping the FP boundary check at this point. + */ + if (pc >= (unsigned long)&call_with_stack && + pc < (unsigned long)&call_with_stack_end) + return 0; /* only go to a higher address on the stack */ low = frame->sp; high = ALIGN(low, THREAD_SIZE); /* check current frame pointer is within bounds */ +#ifdef CONFIG_CC_IS_CLANG + if (fp < low + 4 || fp > high - 4) + return -EINVAL; +#else if (fp < low + 12 || fp > high - 4) return -EINVAL; +#endif + + return 0; +} + +int notrace unwind_frame(struct stackframe *frame) +{ + unsigned long fp = frame->fp; + + if (frame_pointer_check(frame)) + return -EINVAL; + + /* + * When we unwind through an exception stack, include the saved PC + * value into the stack trace. + */ + if (frame->ex_frame) { + struct pt_regs *regs = (struct pt_regs *)frame->sp; + + /* + * We check that 'regs + sizeof(struct pt_regs)' (that is, + * ®s[1]) does not exceed the bottom of the stack to avoid + * accessing data outside the task's stack. This may happen + * when frame->ex_frame is a false positive. + */ + if ((unsigned long)®s[1] > ALIGN(frame->sp, THREAD_SIZE)) + return -EINVAL; + + frame->pc = regs->ARM_pc; + frame->ex_frame = false; + return 0; + } /* restore the registers from the stack frame */ - frame->fp = *(unsigned long *)(fp - 12); - frame->sp = *(unsigned long *)(fp - 8); - frame->pc = *(unsigned long *)(fp - 4); +#ifdef CONFIG_CC_IS_CLANG + frame->sp = frame->fp; + frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); + frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 4)); +#else + frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 12)); + frame->sp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 8)); + frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 4)); +#endif +#ifdef CONFIG_KRETPROBES + if (is_kretprobe_trampoline(frame->pc)) + frame->pc = kretprobe_find_ret_addr(frame->tsk, + (void *)frame->fp, &frame->kr_cur); +#endif + + if (in_entry_text(frame->pc)) + frame->ex_frame = true; return 0; } #endif void notrace walk_stackframe(struct stackframe *frame, - int (*fn)(struct stackframe *, void *), void *data) + bool (*fn)(void *, unsigned long), void *data) { while (1) { int ret; - if (fn(frame, data)) + if (!fn(data, frame->pc)) break; ret = unwind_frame(frame); if (ret < 0) @@ -63,53 +142,32 @@ void notrace walk_stackframe(struct stackframe *frame, EXPORT_SYMBOL(walk_stackframe); #ifdef CONFIG_STACKTRACE -struct stack_trace_data { - struct stack_trace *trace; - unsigned int no_sched_functions; - unsigned int skip; -}; - -static int save_trace(struct stackframe *frame, void *d) +static void start_stack_trace(struct stackframe *frame, struct task_struct *task, + unsigned long fp, unsigned long sp, + unsigned long lr, unsigned long pc) { - struct stack_trace_data *data = d; - struct stack_trace *trace = data->trace; - struct pt_regs *regs; - unsigned long addr = frame->pc; - - if (data->no_sched_functions && in_sched_functions(addr)) - return 0; - if (data->skip) { - data->skip--; - return 0; - } - - trace->entries[trace->nr_entries++] = addr; - - if (trace->nr_entries >= trace->max_entries) - return 1; - - if (!in_entry_text(frame->pc)) - return 0; - - regs = (struct pt_regs *)frame->sp; - - trace->entries[trace->nr_entries++] = regs->ARM_pc; - - return trace->nr_entries >= trace->max_entries; + frame->fp = fp; + frame->sp = sp; + frame->lr = lr; + frame->pc = pc; +#ifdef CONFIG_KRETPROBES + frame->kr_cur = NULL; + frame->tsk = task; +#endif +#ifdef CONFIG_UNWINDER_FRAME_POINTER + frame->ex_frame = in_entry_text(frame->pc); +#endif } -/* This must be noinline to so that our skip calculation works correctly */ -static noinline void __save_stack_trace(struct task_struct *tsk, - struct stack_trace *trace, unsigned int nosched) +void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, + struct task_struct *task, struct pt_regs *regs) { - struct stack_trace_data data; struct stackframe frame; - data.trace = trace; - data.skip = trace->skip; - data.no_sched_functions = nosched; - - if (tsk != current) { + if (regs) { + start_stack_trace(&frame, NULL, regs->ARM_fp, regs->ARM_sp, + regs->ARM_lr, regs->ARM_pc); + } else if (task != current) { #ifdef CONFIG_SMP /* * What guarantees do we have here that 'tsk' is not @@ -118,49 +176,22 @@ static noinline void __save_stack_trace(struct task_struct *tsk, */ return; #else - frame.fp = thread_saved_fp(tsk); - frame.sp = thread_saved_sp(tsk); - frame.lr = 0; /* recovered from the stack */ - frame.pc = thread_saved_pc(tsk); + start_stack_trace(&frame, task, thread_saved_fp(task), + thread_saved_sp(task), 0, + thread_saved_pc(task)); #endif } else { - /* We don't want this function nor the caller */ - data.skip += 2; - frame.fp = (unsigned long)__builtin_frame_address(0); - frame.sp = current_stack_pointer; - frame.lr = (unsigned long)__builtin_return_address(0); - frame.pc = (unsigned long)__save_stack_trace; +here: + start_stack_trace(&frame, task, + (unsigned long)__builtin_frame_address(0), + current_stack_pointer, + (unsigned long)__builtin_return_address(0), + (unsigned long)&&here); + /* skip this function */ + if (unwind_frame(&frame)) + return; } - walk_stackframe(&frame, save_trace, &data); -} - -void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) -{ - struct stack_trace_data data; - struct stackframe frame; - - data.trace = trace; - data.skip = trace->skip; - data.no_sched_functions = 0; - - frame.fp = regs->ARM_fp; - frame.sp = regs->ARM_sp; - frame.lr = regs->ARM_lr; - frame.pc = regs->ARM_pc; - - walk_stackframe(&frame, save_trace, &data); -} - -void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) -{ - __save_stack_trace(tsk, trace, 1); -} -EXPORT_SYMBOL(save_stack_trace_tsk); - -void save_stack_trace(struct stack_trace *trace) -{ - __save_stack_trace(current, trace, 0); + walk_stackframe(&frame, consume_entry, cookie); } -EXPORT_SYMBOL_GPL(save_stack_trace); #endif diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c index d08099269e35..c3ec3861dd07 100644 --- a/arch/arm/kernel/suspend.c +++ b/arch/arm/kernel/suspend.c @@ -1,14 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/ftrace.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/mm_types.h> +#include <linux/pgtable.h> #include <asm/bugs.h> #include <asm/cacheflush.h> #include <asm/idmap.h> -#include <asm/pgalloc.h> -#include <asm/pgtable.h> -#include <asm/memory.h> +#include <asm/page.h> #include <asm/smp_plat.h> #include <asm/suspend.h> #include <asm/tlbflush.h> @@ -27,12 +27,22 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) return -EINVAL; /* + * Function graph tracer state gets incosistent when the kernel + * calls functions that never return (aka suspend finishers) hence + * disable graph tracing during their execution. + */ + pause_graph_tracing(); + + /* * Provide a temporary page table with an identity mapping for * the MMU-enable code, required for resuming. On successful * resume (indicated by a zero return code), we need to switch * back to the correct page tables. */ ret = __cpu_suspend(arg, fn, __mpidr); + + unpause_graph_tracing(); + if (ret == 0) { cpu_switch_mm(mm->pgd, mm); local_flush_bp_all(); @@ -46,7 +56,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) { u32 __mpidr = cpu_logical_map(smp_processor_id()); - return __cpu_suspend(arg, fn, __mpidr); + int ret; + + pause_graph_tracing(); + ret = __cpu_suspend(arg, fn, __mpidr); + unpause_graph_tracing(); + + return ret; } #define idmap_pgd NULL #endif diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index e640871328c1..fdce83c95acb 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -34,6 +34,7 @@ */ #define __user_swpX_asm(data, addr, res, temp, B) \ __asm__ __volatile__( \ + ".arch armv7-a\n" \ "0: ldrex"B" %2, [%3]\n" \ "1: strex"B" %0, %1, [%3]\n" \ " cmp %0, #0\n" \ @@ -97,12 +98,12 @@ static void set_segfault(struct pt_regs *regs, unsigned long addr) { int si_code; - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); if (find_vma(current->mm, addr) == NULL) si_code = SEGV_MAPERR; else si_code = SEGV_ACCERR; - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); pr_debug("SWP{B} emulation: access caused memory abort!\n"); arm_notify_die("Illegal memory access", regs, @@ -195,7 +196,7 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr) destreg, EXTRACT_REG_NUM(instr, RT2_OFFSET), data); /* Check access in reasonable access range for both SWP and SWPB */ - if (!access_ok((address & ~3), 4)) { + if (!access_ok((void __user *)(address & ~3), 4)) { pr_debug("SWP{B} emulation: access to %p not allowed!\n", (void *)address); res = -EFAULT; diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c index a5f183cfecb1..0141e9bb02e8 100644 --- a/arch/arm/kernel/sys_arm.c +++ b/arch/arm/kernel/sys_arm.c @@ -24,6 +24,7 @@ #include <linux/ipc.h> #include <linux/uaccess.h> #include <linux/slab.h> +#include <asm/syscalls.h> /* * Since loff_t is a 64 bit type we avoid a lot of ABI hassle diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index 17bd32b22371..d00f4040a9f5 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -10,6 +10,8 @@ * Copyright: MontaVista Software, Inc. */ +#include <asm/syscalls.h> + /* * The legacy ABI and the new ARM EABI have different rules making some * syscalls incompatible especially with structure arguments. @@ -73,6 +75,7 @@ #include <linux/syscalls.h> #include <linux/errno.h> #include <linux/fs.h> +#include <linux/filelock.h> #include <linux/cred.h> #include <linux/fcntl.h> #include <linux/eventpoll.h> @@ -80,9 +83,12 @@ #include <linux/socket.h> #include <linux/net.h> #include <linux/ipc.h> +#include <linux/ipc_namespace.h> #include <linux/uaccess.h> #include <linux/slab.h> +#include <asm/syscall.h> + struct oldabi_stat64 { unsigned long long st_dev; unsigned int __pad1; @@ -191,117 +197,133 @@ struct oabi_flock64 { pid_t l_pid; } __attribute__ ((packed,aligned(4))); -static long do_locks(unsigned int fd, unsigned int cmd, - unsigned long arg) +static int get_oabi_flock(struct flock64 *kernel, struct oabi_flock64 __user *arg) { - struct flock64 kernel; struct oabi_flock64 user; - mm_segment_t fs; - long ret; if (copy_from_user(&user, (struct oabi_flock64 __user *)arg, sizeof(user))) return -EFAULT; - kernel.l_type = user.l_type; - kernel.l_whence = user.l_whence; - kernel.l_start = user.l_start; - kernel.l_len = user.l_len; - kernel.l_pid = user.l_pid; - - fs = get_fs(); - set_fs(KERNEL_DS); - ret = sys_fcntl64(fd, cmd, (unsigned long)&kernel); - set_fs(fs); - - if (!ret && (cmd == F_GETLK64 || cmd == F_OFD_GETLK)) { - user.l_type = kernel.l_type; - user.l_whence = kernel.l_whence; - user.l_start = kernel.l_start; - user.l_len = kernel.l_len; - user.l_pid = kernel.l_pid; - if (copy_to_user((struct oabi_flock64 __user *)arg, - &user, sizeof(user))) - ret = -EFAULT; - } - return ret; + + kernel->l_type = user.l_type; + kernel->l_whence = user.l_whence; + kernel->l_start = user.l_start; + kernel->l_len = user.l_len; + kernel->l_pid = user.l_pid; + + return 0; +} + +static int put_oabi_flock(struct flock64 *kernel, struct oabi_flock64 __user *arg) +{ + struct oabi_flock64 user; + + user.l_type = kernel->l_type; + user.l_whence = kernel->l_whence; + user.l_start = kernel->l_start; + user.l_len = kernel->l_len; + user.l_pid = kernel->l_pid; + + if (copy_to_user((struct oabi_flock64 __user *)arg, + &user, sizeof(user))) + return -EFAULT; + + return 0; } asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg) { + void __user *argp = (void __user *)arg; + struct fd f = fdget_raw(fd); + struct flock64 flock; + long err = -EBADF; + + if (!f.file) + goto out; + switch (cmd) { - case F_OFD_GETLK: - case F_OFD_SETLK: - case F_OFD_SETLKW: case F_GETLK64: + case F_OFD_GETLK: + err = security_file_fcntl(f.file, cmd, arg); + if (err) + break; + err = get_oabi_flock(&flock, argp); + if (err) + break; + err = fcntl_getlk64(f.file, cmd, &flock); + if (!err) + err = put_oabi_flock(&flock, argp); + break; case F_SETLK64: case F_SETLKW64: - return do_locks(fd, cmd, arg); - + case F_OFD_SETLK: + case F_OFD_SETLKW: + err = security_file_fcntl(f.file, cmd, arg); + if (err) + break; + err = get_oabi_flock(&flock, argp); + if (err) + break; + err = fcntl_setlk64(fd, f.file, cmd, &flock); + break; default: - return sys_fcntl64(fd, cmd, arg); + err = sys_fcntl64(fd, cmd, arg); + break; } + fdput(f); +out: + return err; } struct oabi_epoll_event { - __u32 events; + __poll_t events; __u64 data; } __attribute__ ((packed,aligned(4))); +#ifdef CONFIG_EPOLL asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd, struct oabi_epoll_event __user *event) { struct oabi_epoll_event user; struct epoll_event kernel; - mm_segment_t fs; - long ret; - if (op == EPOLL_CTL_DEL) - return sys_epoll_ctl(epfd, op, fd, NULL); - if (copy_from_user(&user, event, sizeof(user))) + if (ep_op_has_event(op) && + copy_from_user(&user, event, sizeof(user))) return -EFAULT; + kernel.events = user.events; kernel.data = user.data; - fs = get_fs(); - set_fs(KERNEL_DS); - ret = sys_epoll_ctl(epfd, op, fd, &kernel); - set_fs(fs); - return ret; + + return do_epoll_ctl(epfd, op, fd, &kernel, false); } +#else +asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd, + struct oabi_epoll_event __user *event) +{ + return -EINVAL; +} +#endif -asmlinkage long sys_oabi_epoll_wait(int epfd, - struct oabi_epoll_event __user *events, - int maxevents, int timeout) +struct epoll_event __user * +epoll_put_uevent(__poll_t revents, __u64 data, + struct epoll_event __user *uevent) { - struct epoll_event *kbuf; - struct oabi_epoll_event e; - mm_segment_t fs; - long ret, err, i; - - if (maxevents <= 0 || - maxevents > (INT_MAX/sizeof(*kbuf)) || - maxevents > (INT_MAX/sizeof(*events))) - return -EINVAL; - if (!access_ok(events, sizeof(*events) * maxevents)) - return -EFAULT; - kbuf = kmalloc_array(maxevents, sizeof(*kbuf), GFP_KERNEL); - if (!kbuf) - return -ENOMEM; - fs = get_fs(); - set_fs(KERNEL_DS); - ret = sys_epoll_wait(epfd, kbuf, maxevents, timeout); - set_fs(fs); - err = 0; - for (i = 0; i < ret; i++) { - e.events = kbuf[i].events; - e.data = kbuf[i].data; - err = __copy_to_user(events, &e, sizeof(e)); - if (err) - break; - events++; + if (in_oabi_syscall()) { + struct oabi_epoll_event __user *oevent = (void __user *)uevent; + + if (__put_user(revents, &oevent->events) || + __put_user(data, &oevent->data)) + return NULL; + + return (void __user *)(oevent+1); } - kfree(kbuf); - return err ? -EFAULT : ret; + + if (__put_user(revents, &uevent->events) || + __put_user(data, &uevent->data)) + return NULL; + + return uevent+1; } struct oabi_sembuf { @@ -311,46 +333,52 @@ struct oabi_sembuf { unsigned short __pad; }; +#define sc_semopm sem_ctls[2] + +#ifdef CONFIG_SYSVIPC asmlinkage long sys_oabi_semtimedop(int semid, struct oabi_sembuf __user *tsops, unsigned nsops, const struct old_timespec32 __user *timeout) { + struct ipc_namespace *ns; struct sembuf *sops; - struct old_timespec32 local_timeout; long err; int i; + ns = current->nsproxy->ipc_ns; + if (nsops > ns->sc_semopm) + return -E2BIG; if (nsops < 1 || nsops > SEMOPM) return -EINVAL; - if (!access_ok(tsops, sizeof(*tsops) * nsops)) - return -EFAULT; - sops = kmalloc_array(nsops, sizeof(*sops), GFP_KERNEL); + sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL); if (!sops) return -ENOMEM; err = 0; for (i = 0; i < nsops; i++) { struct oabi_sembuf osb; - err |= __copy_from_user(&osb, tsops, sizeof(osb)); + err |= copy_from_user(&osb, tsops, sizeof(osb)); sops[i].sem_num = osb.sem_num; sops[i].sem_op = osb.sem_op; sops[i].sem_flg = osb.sem_flg; tsops++; } - if (timeout) { - /* copy this as well before changing domain protection */ - err |= copy_from_user(&local_timeout, timeout, sizeof(*timeout)); - timeout = &local_timeout; - } if (err) { err = -EFAULT; - } else { - mm_segment_t fs = get_fs(); - set_fs(KERNEL_DS); - err = sys_semtimedop_time32(semid, sops, nsops, timeout); - set_fs(fs); + goto out; } - kfree(sops); + + if (timeout) { + struct timespec64 ts; + err = get_old_timespec32(&ts, timeout); + if (err) + goto out; + err = __do_semtimedop(semid, sops, nsops, &ts, ns); + goto out; + } + err = __do_semtimedop(semid, sops, nsops, NULL, ns); +out: + kvfree(sops); return err; } @@ -377,6 +405,27 @@ asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third, return sys_ipc(call, first, second, third, ptr, fifth); } } +#else +asmlinkage long sys_oabi_semtimedop(int semid, + struct oabi_sembuf __user *tsops, + unsigned nsops, + const struct old_timespec32 __user *timeout) +{ + return -ENOSYS; +} + +asmlinkage long sys_oabi_semop(int semid, struct oabi_sembuf __user *tsops, + unsigned nsops) +{ + return -ENOSYS; +} + +asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third, + void __user *ptr, long fifth) +{ + return -ENOSYS; +} +#endif asmlinkage long sys_oabi_bind(int fd, struct sockaddr __user *addr, int addrlen) { diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index d3a85f01b328..f59927bcfbce 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c @@ -15,7 +15,7 @@ #include <linux/string.h> /* memcpy */ #include <asm/cputype.h> #include <asm/mach/map.h> -#include <asm/memory.h> +#include <asm/page.h> #include <asm/system_info.h> #include <asm/traps.h> #include <asm/tcm.h> diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index dddc7ebf4db4..b3836c94dc74 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -8,7 +8,6 @@ * This file contains the ARM-specific time handling details: * reading the RTC at bootup, etc... */ -#include <linux/clk-provider.h> #include <linux/clockchips.h> #include <linux/clocksource.h> #include <linux/errno.h> @@ -17,6 +16,7 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel.h> +#include <linux/of_clk.h> #include <linux/profile.h> #include <linux/sched.h> #include <linux/sched_clock.h> @@ -60,20 +60,6 @@ unsigned long profile_pc(struct pt_regs *regs) EXPORT_SYMBOL(profile_pc); #endif -#ifndef CONFIG_GENERIC_CLOCKEVENTS -/* - * Kernel system timer support. - */ -void timer_tick(void) -{ - profile_tick(CPU_PROFILING); - xtime_update(1); -#ifndef CONFIG_SMP - update_process_times(user_mode(get_irq_regs())); -#endif -} -#endif - static void dummy_clock_access(struct timespec64 *ts) { ts->tv_sec = 0; diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index b5adaf744630..ef0058de432b 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -178,15 +178,6 @@ static inline void update_cpu_capacity(unsigned int cpuid) {} #endif /* - * The current assumption is that we can power gate each core independently. - * This will be superseded by DT binding once available. - */ -const struct cpumask *cpu_corepower_mask(int cpu) -{ - return &cpu_topology[cpu].thread_sibling; -} - -/* * store_cpu_topology is called at boot when only one cpu is running * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, * which prevents simultaneous write access to cpu_topology array @@ -241,20 +232,6 @@ topology_populated: update_siblings_masks(cpuid); } -static inline int cpu_corepower_flags(void) -{ - return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN; -} - -static struct sched_domain_topology_level arm_topology[] = { -#ifdef CONFIG_SCHED_MC - { cpu_corepower_mask, cpu_corepower_flags, SD_INIT_NAME(GMC) }, - { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, -#endif - { cpu_cpu_mask, SD_INIT_NAME(DIE) }, - { NULL, }, -}; - /* * init_cpu_topology is called at boot when only one cpu is running * which prevent simultaneous write access to cpu_topology array @@ -265,7 +242,4 @@ void __init init_cpu_topology(void) smp_wmb(); parse_dt_topology(); - - /* Set scheduler topology descriptor */ - set_sched_topology(arm_topology); } diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index c053abd1fb53..72c82a4d63ac 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -30,11 +30,13 @@ #include <linux/atomic.h> #include <asm/cacheflush.h> #include <asm/exception.h> +#include <asm/spectre.h> #include <asm/unistd.h> #include <asm/traps.h> #include <asm/ptrace.h> #include <asm/unwind.h> #include <asm/tls.h> +#include <asm/stacktrace.h> #include <asm/system_misc.h> #include <asm/opcodes.h> @@ -60,21 +62,39 @@ static int __init user_debug_setup(char *str) __setup("user_debug=", user_debug_setup); #endif -static void dump_mem(const char *, const char *, unsigned long, unsigned long); +void dump_backtrace_entry(unsigned long where, unsigned long from, + unsigned long frame, const char *loglvl) +{ + unsigned long end = frame + 4 + sizeof(struct pt_regs); + + if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER) && + IS_ENABLED(CONFIG_CC_IS_GCC) && + end > ALIGN(frame, THREAD_SIZE)) { + /* + * If we are walking past the end of the stack, it may be due + * to the fact that we are on an IRQ or overflow stack. In this + * case, we can load the address of the other stack from the + * frame record. + */ + frame = ((unsigned long *)frame)[-2] - 4; + end = frame + 4 + sizeof(struct pt_regs); + } -void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) -{ -#ifdef CONFIG_KALLSYMS - printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from); +#ifndef CONFIG_KALLSYMS + printk("%sFunction entered at [<%08lx>] from [<%08lx>]\n", + loglvl, where, from); +#elif defined CONFIG_BACKTRACE_VERBOSE + printk("%s[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", + loglvl, where, (void *)where, from, (void *)from); #else - printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); + printk("%s %ps from %pS\n", loglvl, (void *)where, (void *)from); #endif - if (in_entry_text(from)) - dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs)); + if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE)) + dump_mem(loglvl, "Exception stack", frame + 4, end); } -void dump_backtrace_stm(u32 *stack, u32 instruction) +void dump_backtrace_stm(u32 *stack, u32 instruction, const char *loglvl) { char str[80], *p; unsigned int x; @@ -86,12 +106,12 @@ void dump_backtrace_stm(u32 *stack, u32 instruction) if (++x == 6) { x = 0; p = str; - printk("%s\n", str); + printk("%s%s\n", loglvl, str); } } } if (p != str) - printk("%s\n", str); + printk("%s%s\n", loglvl, str); } #ifndef CONFIG_ARM_UNWIND @@ -103,7 +123,8 @@ void dump_backtrace_stm(u32 *stack, u32 instruction) static int verify_stack(unsigned long sp) { if (sp < PAGE_OFFSET || - (sp > (unsigned long)high_memory && high_memory != NULL)) + (!IS_ENABLED(CONFIG_VMAP_STACK) && + sp > (unsigned long)high_memory && high_memory != NULL)) return -EFAULT; return 0; @@ -113,21 +134,12 @@ static int verify_stack(unsigned long sp) /* * Dump out the contents of some memory nicely... */ -static void dump_mem(const char *lvl, const char *str, unsigned long bottom, - unsigned long top) +void dump_mem(const char *lvl, const char *str, unsigned long bottom, + unsigned long top) { unsigned long first; - mm_segment_t fs; int i; - /* - * We need to switch to kernel mode so that we can use __get_user - * to safely read from kernel space. Note that we now dump the - * code first, just in case the backtrace kills us. - */ - fs = get_fs(); - set_fs(KERNEL_DS); - printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top); for (first = bottom & ~31; first < top; first += 32) { @@ -140,7 +152,7 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom, for (p = first, i = 0; i < 8 && p < top; i++, p += 4) { if (p >= bottom && p < top) { unsigned long val; - if (__get_user(val, (unsigned long *)p) == 0) + if (!get_kernel_nofault(val, (unsigned long *)p)) sprintf(str + i * 9, " %08lx", val); else sprintf(str + i * 9, " ????????"); @@ -148,11 +160,9 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom, } printk("%s%04lx:%s\n", lvl, first & 0xffff, str); } - - set_fs(fs); } -static void __dump_instr(const char *lvl, struct pt_regs *regs) +static void dump_instr(const char *lvl, struct pt_regs *regs) { unsigned long addr = instruction_pointer(regs); const int thumb = thumb_mode(regs); @@ -168,10 +178,23 @@ static void __dump_instr(const char *lvl, struct pt_regs *regs) for (i = -4; i < 1 + !!thumb; i++) { unsigned int val, bad; - if (thumb) - bad = get_user(val, &((u16 *)addr)[i]); - else - bad = get_user(val, &((u32 *)addr)[i]); + if (thumb) { + u16 tmp; + + if (user_mode(regs)) + bad = get_user(tmp, &((u16 __user *)addr)[i]); + else + bad = get_kernel_nofault(tmp, &((u16 *)addr)[i]); + + val = __mem_to_opcode_thumb16(tmp); + } else { + if (user_mode(regs)) + bad = get_user(val, &((u32 __user *)addr)[i]); + else + bad = get_kernel_nofault(val, &((u32 *)addr)[i]); + + val = __mem_to_opcode_arm(val); + } if (!bad) p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ", @@ -184,32 +207,20 @@ static void __dump_instr(const char *lvl, struct pt_regs *regs) printk("%sCode: %s\n", lvl, str); } -static void dump_instr(const char *lvl, struct pt_regs *regs) -{ - mm_segment_t fs; - - if (!user_mode(regs)) { - fs = get_fs(); - set_fs(KERNEL_DS); - __dump_instr(lvl, regs); - set_fs(fs); - } else { - __dump_instr(lvl, regs); - } -} - #ifdef CONFIG_ARM_UNWIND -static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) +void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, + const char *loglvl) { - unwind_backtrace(regs, tsk); + unwind_backtrace(regs, tsk, loglvl); } #else -static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) +void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, + const char *loglvl) { unsigned int fp, mode; int ok = 1; - printk("Backtrace: "); + printk("%sCall trace: ", loglvl); if (!tsk) tsk = current; @@ -236,18 +247,20 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) pr_cont("\n"); if (ok) - c_backtrace(fp, mode); + c_backtrace(fp, mode, loglvl); } #endif -void show_stack(struct task_struct *tsk, unsigned long *sp) +void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) { - dump_backtrace(NULL, tsk); + dump_backtrace(NULL, tsk, loglvl); barrier(); } #ifdef CONFIG_PREEMPT #define S_PREEMPT " PREEMPT" +#elif defined(CONFIG_PREEMPT_RT) +#define S_PREEMPT " PREEMPT_RT" #else #define S_PREEMPT "" #endif @@ -278,13 +291,15 @@ static int __die(const char *str, int err, struct pt_regs *regs) print_modules(); __show_regs(regs); + __show_regs_alloc_free(regs); pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n", TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk)); if (!user_mode(regs) || in_interrupt()) { dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp, - THREAD_SIZE + (unsigned long)task_stack_page(tsk)); - dump_backtrace(regs, tsk); + ALIGN(regs->ARM_sp - THREAD_SIZE, THREAD_ALIGN) + + THREAD_SIZE); + dump_backtrace(regs, tsk, KERN_EMERG); dump_instr(KERN_EMERG, regs); } @@ -338,7 +353,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr) if (panic_on_oops) panic("Fatal exception"); if (signr) - do_exit(signr); + make_task_dead(signr); } /* @@ -387,7 +402,7 @@ int is_valid_bugaddr(unsigned long pc) u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE); #endif - if (probe_kernel_address((unsigned *)pc, bkpt)) + if (get_kernel_nofault(bkpt, (void *)pc)) return 0; return bkpt == insn; @@ -475,7 +490,7 @@ asmlinkage void do_undefinstr(struct pt_regs *regs) die_sig: #ifdef CONFIG_DEBUG_USER if (user_debug & UDBG_UNDEFINED) { - pr_info("%s (%d): undefined instruction: pc=%p\n", + pr_info("%s (%d): undefined instruction: pc=%px\n", current->comm, task_pid_nr(current), pc); __show_regs(regs); dump_instr(KERN_INFO, regs); @@ -562,7 +577,7 @@ __do_cache_op(unsigned long start, unsigned long end) if (fatal_signal_pending(current)) return 0; - ret = flush_cache_user_range(start, start + chunk); + ret = flush_icache_user_range(start, start + chunk); if (ret) return ret; @@ -579,7 +594,7 @@ do_cache_op(unsigned long start, unsigned long end, int flags) if (end < start || flags) return -EINVAL; - if (!access_ok(start, end - start)) + if (!access_ok((void __user *)start, end - start)) return -EFAULT; return __do_cache_op(start, end); @@ -659,10 +674,10 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) if (user_debug & UDBG_SYSCALL) { pr_err("[%d] %s: arm syscall %d\n", task_pid_nr(current), current->comm, no); - dump_instr("", regs); + dump_instr(KERN_ERR, regs); if (user_mode(regs)) { __show_regs(regs); - c_backtrace(frame_pointer(regs), processor_mode(regs)); + c_backtrace(frame_pointer(regs), processor_mode(regs), KERN_ERR); } } #endif @@ -741,6 +756,7 @@ void __readwrite_bug(const char *fn) } EXPORT_SYMBOL(__readwrite_bug); +#ifdef CONFIG_MMU void __pte_error(const char *file, int line, pte_t pte) { pr_err("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte)); @@ -755,6 +771,7 @@ void __pgd_error(const char *file, int line, pgd_t pgd) { pr_err("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd)); } +#endif asmlinkage void __div0(void) { @@ -771,11 +788,6 @@ void abort(void) panic("Oops failed to kill thread"); } -void __init trap_init(void) -{ - return; -} - #ifdef CONFIG_KUSER_HELPERS static void __init kuser_init(void *vectors) { @@ -797,10 +809,59 @@ static inline void __init kuser_init(void *vectors) } #endif +#ifndef CONFIG_CPU_V7M +static void copy_from_lma(void *vma, void *lma_start, void *lma_end) +{ + memcpy(vma, lma_start, lma_end - lma_start); +} + +static void flush_vectors(void *vma, size_t offset, size_t size) +{ + unsigned long start = (unsigned long)vma + offset; + unsigned long end = start + size; + + flush_icache_range(start, end); +} + +#ifdef CONFIG_HARDEN_BRANCH_HISTORY +int spectre_bhb_update_vectors(unsigned int method) +{ + extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[]; + extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[]; + void *vec_start, *vec_end; + + if (system_state >= SYSTEM_FREEING_INITMEM) { + pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n", + smp_processor_id()); + return SPECTRE_VULNERABLE; + } + + switch (method) { + case SPECTRE_V2_METHOD_LOOP8: + vec_start = __vectors_bhb_loop8_start; + vec_end = __vectors_bhb_loop8_end; + break; + + case SPECTRE_V2_METHOD_BPIALL: + vec_start = __vectors_bhb_bpiall_start; + vec_end = __vectors_bhb_bpiall_end; + break; + + default: + pr_err("CPU%u: unknown Spectre BHB state %d\n", + smp_processor_id(), method); + return SPECTRE_VULNERABLE; + } + + copy_from_lma(vectors_page, vec_start, vec_end); + flush_vectors(vectors_page, 0, vec_end - vec_start); + + return SPECTRE_MITIGATED; +} +#endif + void __init early_trap_init(void *vectors_base) { -#ifndef CONFIG_CPU_V7M - unsigned long vectors = (unsigned long)vectors_base; extern char __stubs_start[], __stubs_end[]; extern char __vectors_start[], __vectors_end[]; unsigned i; @@ -821,17 +882,87 @@ void __init early_trap_init(void *vectors_base) * into the vector page, mapped at 0xffff0000, and ensure these * are visible to the instruction stream. */ - memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); - memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start); + copy_from_lma(vectors_base, __vectors_start, __vectors_end); + copy_from_lma(vectors_base + 0x1000, __stubs_start, __stubs_end); kuser_init(vectors_base); - flush_icache_range(vectors, vectors + PAGE_SIZE * 2); + flush_vectors(vectors_base, 0, PAGE_SIZE * 2); +} #else /* ifndef CONFIG_CPU_V7M */ +void __init early_trap_init(void *vectors_base) +{ /* * on V7-M there is no need to copy the vector table to a dedicated * memory area. The address is configurable and so a table in the kernel * image can be used. */ +} #endif + +#ifdef CONFIG_VMAP_STACK + +DECLARE_PER_CPU(u8 *, irq_stack_ptr); + +asmlinkage DEFINE_PER_CPU(u8 *, overflow_stack_ptr); + +static int __init allocate_overflow_stacks(void) +{ + u8 *stack; + int cpu; + + for_each_possible_cpu(cpu) { + stack = (u8 *)__get_free_page(GFP_KERNEL); + if (WARN_ON(!stack)) + return -ENOMEM; + per_cpu(overflow_stack_ptr, cpu) = &stack[OVERFLOW_STACK_SIZE]; + } + return 0; } +early_initcall(allocate_overflow_stacks); + +asmlinkage void handle_bad_stack(struct pt_regs *regs) +{ + unsigned long tsk_stk = (unsigned long)current->stack; +#ifdef CONFIG_IRQSTACKS + unsigned long irq_stk = (unsigned long)raw_cpu_read(irq_stack_ptr); +#endif + unsigned long ovf_stk = (unsigned long)raw_cpu_read(overflow_stack_ptr); + + console_verbose(); + pr_emerg("Insufficient stack space to handle exception!"); + + pr_emerg("Task stack: [0x%08lx..0x%08lx]\n", + tsk_stk, tsk_stk + THREAD_SIZE); +#ifdef CONFIG_IRQSTACKS + pr_emerg("IRQ stack: [0x%08lx..0x%08lx]\n", + irq_stk - THREAD_SIZE, irq_stk); +#endif + pr_emerg("Overflow stack: [0x%08lx..0x%08lx]\n", + ovf_stk - OVERFLOW_STACK_SIZE, ovf_stk); + + die("kernel stack overflow", regs, 0); +} + +#ifndef CONFIG_ARM_LPAE +/* + * Normally, we rely on the logic in do_translation_fault() to update stale PMD + * entries covering the vmalloc space in a task's page tables when it first + * accesses the region in question. Unfortunately, this is not sufficient when + * the task stack resides in the vmalloc region, as do_translation_fault() is a + * C function that needs a stack to run. + * + * So we need to ensure that these PMD entries are up to date *before* the MM + * switch. As we already have some logic in the MM switch path that takes care + * of this, let's trigger it by bumping the counter every time the core vmalloc + * code modifies a PMD entry in the vmalloc region. Use release semantics on + * the store so that other CPUs observing the counter's new value are + * guaranteed to see the updated page table entries as well. + */ +void arch_sync_kernel_mappings(unsigned long start, unsigned long end) +{ + if (start < VMALLOC_END && end > VMALLOC_START) + atomic_inc_return_release(&init_mm.context.vmalloc_seq); +} +#endif +#endif diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index 4574e6aea0a5..f60547dadc93 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c @@ -18,9 +18,6 @@ #warning Your compiler does not have EABI support. #warning ARM unwind is known to compile only with EABI compilers. #warning Change compiler or disable ARM_UNWIND option. -#elif (__GNUC__ == 4 && __GNUC_MINOR__ <= 2) && !defined(__clang__) -#warning Your compiler is too buggy; it is known to not compile ARM unwind support. -#warning Change compiler or disable ARM_UNWIND option. #endif #endif /* __CHECKER__ */ @@ -31,11 +28,14 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/list.h> +#include <linux/module.h> #include <asm/stacktrace.h> #include <asm/traps.h> #include <asm/unwind.h> +#include "reboot.h" + /* Dummy functions to avoid linker complaints */ void __aeabi_unwind_cpp_pr0(void) { @@ -56,6 +56,7 @@ struct unwind_ctrl_block { unsigned long vrs[16]; /* virtual register set */ const unsigned long *insn; /* pointer to the current instructions word */ unsigned long sp_high; /* highest value of sp allowed */ + unsigned long *lr_addr; /* address of LR value on the stack */ /* * 1 : check for stack overflow for each register pop. * 0 : save overhead if there is plenty of stack remaining. @@ -236,7 +237,13 @@ static int unwind_pop_register(struct unwind_ctrl_block *ctrl, if (*vsp >= (unsigned long *)ctrl->sp_high) return -URC_FAILURE; - ctrl->vrs[reg] = *(*vsp)++; + /* Use READ_ONCE_NOCHECK here to avoid this memory access + * from being tracked by KASAN. + */ + ctrl->vrs[reg] = READ_ONCE_NOCHECK(*(*vsp)); + if (reg == 14) + ctrl->lr_addr = *vsp; + (*vsp)++; return URC_OK; } @@ -255,8 +262,9 @@ static int unwind_exec_pop_subset_r4_to_r13(struct unwind_ctrl_block *ctrl, mask >>= 1; reg++; } - if (!load_sp) + if (!load_sp) { ctrl->vrs[SP] = (unsigned long)vsp; + } return URC_OK; } @@ -300,6 +308,29 @@ static int unwind_exec_pop_subset_r0_to_r3(struct unwind_ctrl_block *ctrl, return URC_OK; } +static unsigned long unwind_decode_uleb128(struct unwind_ctrl_block *ctrl) +{ + unsigned long bytes = 0; + unsigned long insn; + unsigned long result = 0; + + /* + * unwind_get_byte() will advance `ctrl` one instruction at a time, so + * loop until we get an instruction byte where bit 7 is not set. + * + * Note: This decodes a maximum of 4 bytes to output 28 bits data where + * max is 0xfffffff: that will cover a vsp increment of 1073742336, hence + * it is sufficient for unwinding the stack. + */ + do { + insn = unwind_get_byte(ctrl); + result |= (insn & 0x7f) << (bytes * 7); + bytes++; + } while (!!(insn & 0x80) && (bytes != sizeof(result))); + + return result; +} + /* * Execute the current unwind instruction. */ @@ -312,9 +343,9 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) if ((insn & 0xc0) == 0x00) ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4; - else if ((insn & 0xc0) == 0x40) + else if ((insn & 0xc0) == 0x40) { ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4; - else if ((insn & 0xf0) == 0x80) { + } else if ((insn & 0xf0) == 0x80) { unsigned long mask; insn = (insn << 8) | unwind_get_byte(ctrl); @@ -329,9 +360,9 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) if (ret) goto error; } else if ((insn & 0xf0) == 0x90 && - (insn & 0x0d) != 0x0d) + (insn & 0x0d) != 0x0d) { ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f]; - else if ((insn & 0xf0) == 0xa0) { + } else if ((insn & 0xf0) == 0xa0) { ret = unwind_exec_pop_r4_to_rN(ctrl, insn); if (ret) goto error; @@ -353,7 +384,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) if (ret) goto error; } else if (insn == 0xb2) { - unsigned long uleb128 = unwind_get_byte(ctrl); + unsigned long uleb128 = unwind_decode_uleb128(ctrl); ctrl->vrs[SP] += 0x204 + (uleb128 << 2); } else { @@ -374,23 +405,32 @@ error: */ int unwind_frame(struct stackframe *frame) { - unsigned long low; const struct unwind_idx *idx; struct unwind_ctrl_block ctrl; + unsigned long sp_low; /* store the highest address on the stack to avoid crossing it*/ - low = frame->sp; - ctrl.sp_high = ALIGN(low, THREAD_SIZE); + sp_low = frame->sp; + ctrl.sp_high = ALIGN(sp_low - THREAD_SIZE, THREAD_ALIGN) + + THREAD_SIZE; pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__, frame->pc, frame->lr, frame->sp); - if (!kernel_text_address(frame->pc)) - return -URC_FAILURE; - idx = unwind_find_idx(frame->pc); if (!idx) { - pr_warn("unwind: Index not found %08lx\n", frame->pc); + if (frame->pc && kernel_text_address(frame->pc)) { + if (in_module_plt(frame->pc) && frame->pc != frame->lr) { + /* + * Quoting Ard: Veneers only set PC using a + * PC+immediate LDR, and so they don't affect + * the state of the stack or the register file + */ + frame->pc = frame->lr; + return URC_OK; + } + pr_warn("unwind: Index not found %08lx\n", frame->pc); + } return -URC_FAILURE; } @@ -402,7 +442,20 @@ int unwind_frame(struct stackframe *frame) if (idx->insn == 1) /* can't unwind */ return -URC_FAILURE; - else if ((idx->insn & 0x80000000) == 0) + else if (frame->pc == prel31_to_addr(&idx->addr_offset)) { + /* + * Unwinding is tricky when we're halfway through the prologue, + * since the stack frame that the unwinder expects may not be + * fully set up yet. However, one thing we do know for sure is + * that if we are unwinding from the very first instruction of + * a function, we are still effectively in the stack frame of + * the caller, and the unwind info has no relevance yet. + */ + if (frame->pc == frame->lr) + return -URC_FAILURE; + frame->pc = frame->lr; + return URC_OK; + } else if ((idx->insn & 0x80000000) == 0) /* prel31 to the unwind table */ ctrl.insn = (unsigned long *)prel31_to_addr(&idx->insn); else if ((idx->insn & 0xff000000) == 0x80000000) @@ -429,6 +482,16 @@ int unwind_frame(struct stackframe *frame) ctrl.check_each_pop = 0; + if (prel31_to_addr(&idx->addr_offset) == (u32)&call_with_stack) { + /* + * call_with_stack() is the only place where we permit SP to + * jump from one stack to another, and since we know it is + * guaranteed to happen, set up the SP bounds accordingly. + */ + sp_low = frame->fp; + ctrl.sp_high = ALIGN(frame->fp, THREAD_SIZE); + } + while (ctrl.entries > 0) { int urc; if ((ctrl.sp_high - ctrl.vrs[SP]) < sizeof(ctrl.vrs)) @@ -436,7 +499,7 @@ int unwind_frame(struct stackframe *frame) urc = unwind_exec_insn(&ctrl); if (urc < 0) return urc; - if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= ctrl.sp_high) + if (ctrl.vrs[SP] < sp_low || ctrl.vrs[SP] > ctrl.sp_high) return -URC_FAILURE; } @@ -444,21 +507,25 @@ int unwind_frame(struct stackframe *frame) ctrl.vrs[PC] = ctrl.vrs[LR]; /* check for infinite loop */ - if (frame->pc == ctrl.vrs[PC]) + if (frame->pc == ctrl.vrs[PC] && frame->sp == ctrl.vrs[SP]) return -URC_FAILURE; frame->fp = ctrl.vrs[FP]; frame->sp = ctrl.vrs[SP]; frame->lr = ctrl.vrs[LR]; frame->pc = ctrl.vrs[PC]; + frame->lr_addr = ctrl.lr_addr; return URC_OK; } -void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk) +void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk, + const char *loglvl) { struct stackframe frame; + printk("%sCall trace: ", loglvl); + pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); if (!tsk) @@ -473,7 +540,12 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk) frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_stack_pointer; frame.lr = (unsigned long)__builtin_return_address(0); - frame.pc = (unsigned long)unwind_backtrace; + /* We are saving the stack and execution state at this + * point, so we should ensure that frame.pc is within + * this block of code. + */ +here: + frame.pc = (unsigned long)&&here; } else { /* task blocked in __switch_to */ frame.fp = thread_saved_fp(tsk); @@ -493,7 +565,7 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk) urc = unwind_frame(&frame); if (urc < 0) break; - dump_backtrace_entry(where, frame.pc, frame.sp - 4); + dump_backtrace_entry(where, frame.pc, frame.sp - 4, loglvl); } } diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c index c89ac1b9d28b..d499ad461b00 100644 --- a/arch/arm/kernel/vdso.c +++ b/arch/arm/kernel/vdso.c @@ -21,7 +21,6 @@ #include <asm/cacheflush.h> #include <asm/page.h> #include <asm/vdso.h> -#include <asm/vdso_datapage.h> #include <clocksource/arm_arch_timer.h> #include <vdso/helpers.h> #include <vdso/vsyscall.h> @@ -35,9 +34,6 @@ extern char vdso_start[], vdso_end[]; /* Total number of pages needed for the data and text portions of the VDSO. */ unsigned int vdso_total_pages __ro_after_init; -/* - * The VDSO data page. - */ static union vdso_data_store vdso_data_store __page_aligned_data; struct vdso_data *vdso_data = vdso_data_store.data; @@ -50,15 +46,6 @@ static const struct vm_special_mapping vdso_data_mapping = { static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) { - unsigned long new_size = new_vma->vm_end - new_vma->vm_start; - unsigned long vdso_size; - - /* without VVAR page */ - vdso_size = (vdso_total_pages - 1) << PAGE_SHIFT; - - if (vdso_size != new_size) - return -EINVAL; - current->mm->context.vdso = new_vma->vm_start; return 0; @@ -95,6 +82,8 @@ static bool __init cntvct_functional(void) */ np = of_find_compatible_node(NULL, NULL, "arm,armv7-timer"); if (!np) + np = of_find_compatible_node(NULL, NULL, "arm,armv8-timer"); + if (!np) goto out_put; if (of_property_read_bool(np, "arm,cpu-registers-not-fw-configured")) @@ -142,7 +131,7 @@ static Elf32_Sym * __init find_symbol(struct elfinfo *lib, const char *symname) if (lib->dynsym[i].st_name == 0) continue; - strlcpy(name, lib->dynstr + lib->dynsym[i].st_name, + strscpy(name, lib->dynstr + lib->dynsym[i].st_name, MAX_SYMNAME); c = strchr(name, '@'); if (c) @@ -182,6 +171,7 @@ static void __init patch_vdso(void *ehdr) if (!cntvct_ok) { vdso_nullpatch_one(&einfo, "__vdso_gettimeofday"); vdso_nullpatch_one(&einfo, "__vdso_clock_gettime"); + vdso_nullpatch_one(&einfo, "__vdso_clock_gettime64"); } } @@ -238,7 +228,7 @@ static int install_vvar(struct mm_struct *mm, unsigned long addr) return PTR_ERR_OR_ZERO(vma); } -/* assumes mmap_sem is write-locked */ +/* assumes mmap_lock is write-locked */ void arm_install_vdso(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma; diff --git a/arch/arm/kernel/vmcore_info.c b/arch/arm/kernel/vmcore_info.c new file mode 100644 index 000000000000..1437aba47787 --- /dev/null +++ b/arch/arm/kernel/vmcore_info.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/vmcore_info.h> + +void arch_crash_save_vmcoreinfo(void) +{ +#ifdef CONFIG_ARM_LPAE + VMCOREINFO_CONFIG(ARM_LPAE); +#endif +} diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S index 21b8b271c80d..c16d196b5aad 100644 --- a/arch/arm/kernel/vmlinux-xip.lds.S +++ b/arch/arm/kernel/vmlinux-xip.lds.S @@ -9,14 +9,11 @@ #include <linux/sizes.h> -#include <asm-generic/vmlinux.lds.h> +#include <asm/vmlinux.lds.h> #include <asm/cache.h> #include <asm/thread_info.h> -#include <asm/memory.h> -#include <asm/mpu.h> #include <asm/page.h> - -#include "vmlinux.lds.h" +#include <asm/mpu.h> OUTPUT_ARCH(arm) ENTRY(stext) @@ -42,6 +39,10 @@ SECTIONS ARM_DISCARD *(.alt.smp.init) *(.pv_table) +#ifndef CONFIG_ARM_UNWIND + *(.ARM.exidx) *(.ARM.exidx.*) + *(.ARM.extab) *(.ARM.extab.*) +#endif } . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR); @@ -152,6 +153,10 @@ SECTIONS _end = .; STABS_DEBUG + DWARF_DEBUG + ARM_DETAILS + + ARM_ASSERTS } /* @@ -160,15 +165,9 @@ SECTIONS * binutils is too old (for other reasons as well) */ ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support") +#ifndef CONFIG_COMPILE_TEST ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") - -/* - * The HYP init code can't be more than a page long, - * and should not cross a page boundary. - * The above comment applies as well. - */ -ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE, - "HYP init code too big or misaligned") +#endif #ifdef CONFIG_XIP_DEFLATED_DATA /* @@ -178,7 +177,7 @@ ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE, ASSERT((_end - __bss_start) >= 12288, ".bss too small for CONFIG_XIP_DEFLATED_DATA") #endif -#ifdef CONFIG_ARM_MPU +#if defined(CONFIG_ARM_MPU) && !defined(CONFIG_COMPILE_TEST) /* * Due to PMSAv7 restriction on base address and size we have to * enforce minimal alignment restrictions. It was seen that weaker diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 319ccb10846a..bd9127c4b451 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -8,15 +8,12 @@ #include "vmlinux-xip.lds.S" #else -#include <asm-generic/vmlinux.lds.h> +#include <linux/pgtable.h> +#include <asm/vmlinux.lds.h> #include <asm/cache.h> #include <asm/thread_info.h> -#include <asm/memory.h> -#include <asm/mpu.h> #include <asm/page.h> -#include <asm/pgtable.h> - -#include "vmlinux.lds.h" +#include <asm/mpu.h> OUTPUT_ARCH(arm) ENTRY(stext) @@ -43,9 +40,13 @@ SECTIONS #ifndef CONFIG_SMP_ON_UP *(.alt.smp.init) #endif +#ifndef CONFIG_ARM_UNWIND + *(.ARM.exidx) *(.ARM.exidx.*) + *(.ARM.extab) *(.ARM.extab.*) +#endif } - . = PAGE_OFFSET + TEXT_OFFSET; + . = KERNEL_OFFSET + TEXT_OFFSET; .head.text : { _text = .; HEAD_TEXT @@ -136,12 +137,12 @@ SECTIONS #ifdef CONFIG_STRICT_KERNEL_RWX . = ALIGN(1<<SECTION_SHIFT); #else - . = ALIGN(THREAD_SIZE); + . = ALIGN(THREAD_ALIGN); #endif __init_end = .; _sdata = .; - RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN) _edata = .; BSS_SECTION(0, 0, 0) @@ -151,6 +152,10 @@ SECTIONS _end = .; STABS_DEBUG + DWARF_DEBUG + ARM_DETAILS + + ARM_ASSERTS } #ifdef CONFIG_STRICT_KERNEL_RWX @@ -168,14 +173,8 @@ __start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT); * binutils is too old (for other reasons as well) */ ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support") +#ifndef CONFIG_COMPILE_TEST ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") - -/* - * The HYP init code can't be more than a page long, - * and should not cross a page boundary. - * The above comment applies as well. - */ -ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE, - "HYP init code too big or misaligned") +#endif #endif /* CONFIG_XIP_KERNEL */ diff --git a/arch/arm/kernel/vmlinux.lds.h b/arch/arm/kernel/vmlinux.lds.h deleted file mode 100644 index 8247bc15addc..000000000000 --- a/arch/arm/kernel/vmlinux.lds.h +++ /dev/null @@ -1,137 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#ifdef CONFIG_HOTPLUG_CPU -#define ARM_CPU_DISCARD(x) -#define ARM_CPU_KEEP(x) x -#else -#define ARM_CPU_DISCARD(x) x -#define ARM_CPU_KEEP(x) -#endif - -#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \ - defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL) -#define ARM_EXIT_KEEP(x) x -#define ARM_EXIT_DISCARD(x) -#else -#define ARM_EXIT_KEEP(x) -#define ARM_EXIT_DISCARD(x) x -#endif - -#ifdef CONFIG_MMU -#define ARM_MMU_KEEP(x) x -#define ARM_MMU_DISCARD(x) -#else -#define ARM_MMU_KEEP(x) -#define ARM_MMU_DISCARD(x) x -#endif - -#define PROC_INFO \ - . = ALIGN(4); \ - __proc_info_begin = .; \ - *(.proc.info.init) \ - __proc_info_end = .; - -#define HYPERVISOR_TEXT \ - __hyp_text_start = .; \ - *(.hyp.text) \ - __hyp_text_end = .; - -#define IDMAP_TEXT \ - ALIGN_FUNCTION(); \ - __idmap_text_start = .; \ - *(.idmap.text) \ - __idmap_text_end = .; \ - . = ALIGN(PAGE_SIZE); \ - __hyp_idmap_text_start = .; \ - *(.hyp.idmap.text) \ - __hyp_idmap_text_end = .; - -#define ARM_DISCARD \ - *(.ARM.exidx.exit.text) \ - *(.ARM.extab.exit.text) \ - *(.ARM.exidx.text.exit) \ - *(.ARM.extab.text.exit) \ - ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) \ - ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) \ - ARM_EXIT_DISCARD(EXIT_TEXT) \ - ARM_EXIT_DISCARD(EXIT_DATA) \ - EXIT_CALL \ - ARM_MMU_DISCARD(*(.text.fixup)) \ - ARM_MMU_DISCARD(*(__ex_table)) \ - *(.discard) \ - *(.discard.*) - -#define ARM_TEXT \ - IDMAP_TEXT \ - __entry_text_start = .; \ - *(.entry.text) \ - __entry_text_end = .; \ - IRQENTRY_TEXT \ - SOFTIRQENTRY_TEXT \ - TEXT_TEXT \ - SCHED_TEXT \ - CPUIDLE_TEXT \ - LOCK_TEXT \ - HYPERVISOR_TEXT \ - KPROBES_TEXT \ - *(.gnu.warning) \ - *(.glue_7) \ - *(.glue_7t) \ - . = ALIGN(4); \ - *(.got) /* Global offset table */ \ - ARM_CPU_KEEP(PROC_INFO) - -/* Stack unwinding tables */ -#define ARM_UNWIND_SECTIONS \ - . = ALIGN(8); \ - .ARM.unwind_idx : { \ - __start_unwind_idx = .; \ - *(.ARM.exidx*) \ - __stop_unwind_idx = .; \ - } \ - .ARM.unwind_tab : { \ - __start_unwind_tab = .; \ - *(.ARM.extab*) \ - __stop_unwind_tab = .; \ - } - -/* - * The vectors and stubs are relocatable code, and the - * only thing that matters is their relative offsets - */ -#define ARM_VECTORS \ - __vectors_start = .; \ - .vectors 0xffff0000 : AT(__vectors_start) { \ - *(.vectors) \ - } \ - . = __vectors_start + SIZEOF(.vectors); \ - __vectors_end = .; \ - \ - __stubs_start = .; \ - .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) { \ - *(.stubs) \ - } \ - . = __stubs_start + SIZEOF(.stubs); \ - __stubs_end = .; \ - \ - PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors)); - -#define ARM_TCM \ - __itcm_start = ALIGN(4); \ - .text_itcm ITCM_OFFSET : AT(__itcm_start - LOAD_OFFSET) { \ - __sitcm_text = .; \ - *(.tcm.text) \ - *(.tcm.rodata) \ - . = ALIGN(4); \ - __eitcm_text = .; \ - } \ - . = __itcm_start + SIZEOF(.text_itcm); \ - \ - __dtcm_start = .; \ - .data_dtcm DTCM_OFFSET : AT(__dtcm_start - LOAD_OFFSET) { \ - __sdtcm_data = .; \ - *(.tcm.data) \ - . = ALIGN(4); \ - __edtcm_data = .; \ - } \ - . = __dtcm_start + SIZEOF(.data_dtcm); diff --git a/arch/arm/kernel/xscale-cp0.c b/arch/arm/kernel/xscale-cp0.c index ed4f6e77616d..00d00d3aae97 100644 --- a/arch/arm/kernel/xscale-cp0.c +++ b/arch/arm/kernel/xscale-cp0.c @@ -166,6 +166,7 @@ static int __init xscale_cp0_init(void) pr_info("XScale iWMMXt coprocessor detected.\n"); elf_hwcap |= HWCAP_IWMMXT; thread_register_notifier(&iwmmxt_notifier_block); + register_iwmmxt_undef_handler(); #endif } else { pr_info("XScale DSP coprocessor detected.\n"); |