diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/arm/kernel/arthur.c | 94 | ||||
-rw-r--r-- | arch/arm/kernel/asm-offsets.c | 5 | ||||
-rw-r--r-- | arch/arm/kernel/cpuidle.c | 133 | ||||
-rw-r--r-- | arch/arm/kernel/hw_breakpoint.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/signal.c | 13 | ||||
-rw-r--r-- | arch/arm/kernel/time.c | 6 | ||||
-rw-r--r-- | arch/arm/kernel/traps.c | 6 | ||||
-rw-r--r-- | arch/arm/kernel/vdso.c | 12 | ||||
-rw-r--r-- | arch/arm/kernel/vmlinux.lds.S | 11 |
10 files changed, 152 insertions, 131 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index ba5f83226011..752725dcbf42 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -34,7 +34,6 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle.o obj-$(CONFIG_ISA_DMA_API) += dma.o obj-$(CONFIG_FIQ) += fiq.o fiqasm.o obj-$(CONFIG_MODULES) += armksyms.o module.o -obj-$(CONFIG_ARTHUR) += arthur.o obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_PCI) += bios32.o isa.o obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o diff --git a/arch/arm/kernel/arthur.c b/arch/arm/kernel/arthur.c deleted file mode 100644 index 321c5291d05f..000000000000 --- a/arch/arm/kernel/arthur.c +++ /dev/null @@ -1,94 +0,0 @@ -/* - * linux/arch/arm/kernel/arthur.c - * - * Copyright (C) 1998, 1999, 2000, 2001 Philip Blundell - * - * Arthur personality - */ - -/* - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include <linux/module.h> -#include <linux/personality.h> -#include <linux/stddef.h> -#include <linux/signal.h> -#include <linux/init.h> -#include <linux/sched.h> - -#include <asm/ptrace.h> - -/* Arthur doesn't have many signals, and a lot of those that it does - have don't map easily to any Linux equivalent. Never mind. */ - -#define ARTHUR_SIGABRT 1 -#define ARTHUR_SIGFPE 2 -#define ARTHUR_SIGILL 3 -#define ARTHUR_SIGINT 4 -#define ARTHUR_SIGSEGV 5 -#define ARTHUR_SIGTERM 6 -#define ARTHUR_SIGSTAK 7 -#define ARTHUR_SIGUSR1 8 -#define ARTHUR_SIGUSR2 9 -#define ARTHUR_SIGOSERROR 10 - -static unsigned long arthur_to_linux_signals[32] = { - 0, 1, 2, 3, 4, 5, 6, 7, - 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 30, 31 -}; - -static unsigned long linux_to_arthur_signals[32] = { - 0, -1, ARTHUR_SIGINT, -1, - ARTHUR_SIGILL, 5, ARTHUR_SIGABRT, 7, - ARTHUR_SIGFPE, 9, ARTHUR_SIGUSR1, ARTHUR_SIGSEGV, - ARTHUR_SIGUSR2, 13, 14, ARTHUR_SIGTERM, - 16, 17, 18, 19, - 20, 21, 22, 23, - 24, 25, 26, 27, - 28, 29, 30, 31 -}; - -static void arthur_lcall7(int nr, struct pt_regs *regs) -{ - struct siginfo info; - info.si_signo = SIGSWI; - info.si_errno = nr; - /* Bounce it to the emulator */ - send_sig_info(SIGSWI, &info, current); -} - -static struct exec_domain arthur_exec_domain = { - .name = "Arthur", - .handler = arthur_lcall7, - .pers_low = PER_RISCOS, - .pers_high = PER_RISCOS, - .signal_map = arthur_to_linux_signals, - .signal_invmap = linux_to_arthur_signals, - .module = THIS_MODULE, -}; - -/* - * We could do with some locking to stop Arthur being removed while - * processes are using it. - */ - -static int __init arthur_init(void) -{ - return register_exec_domain(&arthur_exec_domain); -} - -static void __exit arthur_exit(void) -{ - unregister_exec_domain(&arthur_exec_domain); -} - -module_init(arthur_init); -module_exit(arthur_exit); - -MODULE_LICENSE("GPL"); diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 9147008f0d51..871b8267d211 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -67,7 +67,6 @@ int main(void) DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_TASK, offsetof(struct thread_info, task)); - DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain)); DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context)); @@ -191,7 +190,6 @@ int main(void) DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar)); DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar)); DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc)); -#ifdef CONFIG_KVM_ARM_VGIC DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr)); DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr)); @@ -201,14 +199,11 @@ int main(void) DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr)); DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr)); DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); -#ifdef CONFIG_KVM_ARM_TIMER DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl)); DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval)); DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff)); DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled)); -#endif DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); -#endif DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); #endif BLANK(); diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c index 89545f6c8403..318da33465f4 100644 --- a/arch/arm/kernel/cpuidle.c +++ b/arch/arm/kernel/cpuidle.c @@ -10,8 +10,28 @@ */ #include <linux/cpuidle.h> -#include <asm/proc-fns.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <asm/cpuidle.h> +extern struct of_cpuidle_method __cpuidle_method_of_table[]; + +static const struct of_cpuidle_method __cpuidle_method_of_table_sentinel + __used __section(__cpuidle_method_of_table_end); + +static struct cpuidle_ops cpuidle_ops[NR_CPUS]; + +/** + * arm_cpuidle_simple_enter() - a wrapper to cpu_do_idle() + * @dev: not used + * @drv: not used + * @index: not used + * + * A trivial wrapper to allow the cpu_do_idle function to be assigned as a + * cpuidle callback by matching the function signature. + * + * Returns the index passed as parameter + */ int arm_cpuidle_simple_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { @@ -19,3 +39,114 @@ int arm_cpuidle_simple_enter(struct cpuidle_device *dev, return index; } + +/** + * arm_cpuidle_suspend() - function to enter low power idle states + * @index: an integer used as an identifier for the low level PM callbacks + * + * This function calls the underlying arch specific low level PM code as + * registered at the init time. + * + * Returns -EOPNOTSUPP if no suspend callback is defined, the result of the + * callback otherwise. + */ +int arm_cpuidle_suspend(int index) +{ + int ret = -EOPNOTSUPP; + int cpu = smp_processor_id(); + + if (cpuidle_ops[cpu].suspend) + ret = cpuidle_ops[cpu].suspend(cpu, index); + + return ret; +} + +/** + * arm_cpuidle_get_ops() - find a registered cpuidle_ops by name + * @method: the method name + * + * Search in the __cpuidle_method_of_table array the cpuidle ops matching the + * method name. + * + * Returns a struct cpuidle_ops pointer, NULL if not found. + */ +static struct cpuidle_ops *__init arm_cpuidle_get_ops(const char *method) +{ + struct of_cpuidle_method *m = __cpuidle_method_of_table; + + for (; m->method; m++) + if (!strcmp(m->method, method)) + return m->ops; + + return NULL; +} + +/** + * arm_cpuidle_read_ops() - Initialize the cpuidle ops with the device tree + * @dn: a pointer to a struct device node corresponding to a cpu node + * @cpu: the cpu identifier + * + * Get the method name defined in the 'enable-method' property, retrieve the + * associated cpuidle_ops and do a struct copy. This copy is needed because all + * cpuidle_ops are tagged __initdata and will be unloaded after the init + * process. + * + * Return 0 on sucess, -ENOENT if no 'enable-method' is defined, -EOPNOTSUPP if + * no cpuidle_ops is registered for the 'enable-method'. + */ +static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu) +{ + const char *enable_method; + struct cpuidle_ops *ops; + + enable_method = of_get_property(dn, "enable-method", NULL); + if (!enable_method) + return -ENOENT; + + ops = arm_cpuidle_get_ops(enable_method); + if (!ops) { + pr_warn("%s: unsupported enable-method property: %s\n", + dn->full_name, enable_method); + return -EOPNOTSUPP; + } + + cpuidle_ops[cpu] = *ops; /* structure copy */ + + pr_notice("cpuidle: enable-method property '%s'" + " found operations\n", enable_method); + + return 0; +} + +/** + * arm_cpuidle_init() - Initialize cpuidle_ops for a specific cpu + * @cpu: the cpu to be initialized + * + * Initialize the cpuidle ops with the device for the cpu and then call + * the cpu's idle initialization callback. This may fail if the underlying HW + * is not operational. + * + * Returns: + * 0 on success, + * -ENODEV if it fails to find the cpu node in the device tree, + * -EOPNOTSUPP if it does not find a registered cpuidle_ops for this cpu, + * -ENOENT if it fails to find an 'enable-method' property, + * -ENXIO if the HW reports a failure or a misconfiguration, + * -ENOMEM if the HW report an memory allocation failure + */ +int __init arm_cpuidle_init(int cpu) +{ + struct device_node *cpu_node = of_cpu_device_node_get(cpu); + int ret; + + if (!cpu_node) + return -ENODEV; + + ret = arm_cpuidle_read_ops(cpu_node, cpu); + if (!ret && cpuidle_ops[cpu].init) + ret = cpuidle_ops[cpu].init(cpu_node, cpu); + + of_node_put(cpu_node); + + return ret; +} diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 7fc70ae21185..dc7d0a95bd36 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -648,7 +648,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) * Per-cpu breakpoints are not supported by our stepping * mechanism. */ - if (!bp->hw.bp_target) + if (!bp->hw.target) return -EINVAL; /* diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 023ac905e4c3..423663e23791 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -318,17 +318,6 @@ get_sigframe(struct ksignal *ksig, struct pt_regs *regs, int framesize) return frame; } -/* - * translate the signal - */ -static inline int map_sig(int sig) -{ - struct thread_info *thread = current_thread_info(); - if (sig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap) - sig = thread->exec_domain->signal_invmap[sig]; - return sig; -} - static int setup_return(struct pt_regs *regs, struct ksignal *ksig, unsigned long __user *rc, void __user *frame) @@ -412,7 +401,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, } } - regs->ARM_r0 = map_sig(ksig->sig); + regs->ARM_r0 = ksig->sig; regs->ARM_sp = (unsigned long)frame; regs->ARM_lr = retcode; regs->ARM_pc = handler; diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index 0cc7e58c47cc..a66e37e211a9 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -76,7 +76,7 @@ void timer_tick(void) } #endif -static void dummy_clock_access(struct timespec *ts) +static void dummy_clock_access(struct timespec64 *ts) { ts->tv_sec = 0; ts->tv_nsec = 0; @@ -85,12 +85,12 @@ static void dummy_clock_access(struct timespec *ts) static clock_access_fn __read_persistent_clock = dummy_clock_access; static clock_access_fn __read_boot_clock = dummy_clock_access;; -void read_persistent_clock(struct timespec *ts) +void read_persistent_clock64(struct timespec64 *ts) { __read_persistent_clock(ts); } -void read_boot_clock(struct timespec *ts) +void read_boot_clock64(struct timespec64 *ts) { __read_boot_clock(ts); } diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 788e23fe64d8..3dce1a342030 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -505,12 +505,10 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason) static int bad_syscall(int n, struct pt_regs *regs) { - struct thread_info *thread = current_thread_info(); siginfo_t info; - if ((current->personality & PER_MASK) != PER_LINUX && - thread->exec_domain->handler) { - thread->exec_domain->handler(n, regs); + if ((current->personality & PER_MASK) != PER_LINUX) { + send_sig(SIGSEGV, current, 1); return regs->ARM_r0; } diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c index 0d31d3ccab81..efe17dd9b921 100644 --- a/arch/arm/kernel/vdso.c +++ b/arch/arm/kernel/vdso.c @@ -270,7 +270,7 @@ static bool tk_is_cntvct(const struct timekeeper *tk) if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) return false; - if (strcmp(tk->tkr.clock->name, "arch_sys_counter") != 0) + if (strcmp(tk->tkr_mono.clock->name, "arch_sys_counter") != 0) return false; return true; @@ -316,12 +316,12 @@ void update_vsyscall(struct timekeeper *tk) vdso_data->wtm_clock_nsec = wtm->tv_nsec; if (vdso_data->tk_is_cntvct) { - vdso_data->cs_cycle_last = tk->tkr.cycle_last; + vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; vdso_data->xtime_clock_sec = tk->xtime_sec; - vdso_data->xtime_clock_snsec = tk->tkr.xtime_nsec; - vdso_data->cs_mult = tk->tkr.mult; - vdso_data->cs_shift = tk->tkr.shift; - vdso_data->cs_mask = tk->tkr.mask; + vdso_data->xtime_clock_snsec = tk->tkr_mono.xtime_nsec; + vdso_data->cs_mult = tk->tkr_mono.mult; + vdso_data->cs_shift = tk->tkr_mono.shift; + vdso_data->cs_mask = tk->tkr_mono.mask; } vdso_write_end(vdso_data); diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 7a301be9ac67..8b60fde5ce48 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -11,7 +11,7 @@ #ifdef CONFIG_ARM_KERNMEM_PERMS #include <asm/pgtable.h> #endif - + #define PROC_INFO \ . = ALIGN(4); \ VMLINUX_SYMBOL(__proc_info_begin) = .; \ @@ -23,7 +23,7 @@ VMLINUX_SYMBOL(__idmap_text_start) = .; \ *(.idmap.text) \ VMLINUX_SYMBOL(__idmap_text_end) = .; \ - . = ALIGN(32); \ + . = ALIGN(PAGE_SIZE); \ VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ *(.hyp.idmap.text) \ VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; @@ -343,8 +343,11 @@ SECTIONS */ ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support") ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") + /* - * The HYP init code can't be more than a page long. + * The HYP init code can't be more than a page long, + * and should not cross a page boundary. * The above comment applies as well. */ -ASSERT(((__hyp_idmap_text_end - __hyp_idmap_text_start) <= PAGE_SIZE), "HYP init code too big") +ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE, + "HYP init code too big or misaligned") |