diff options
Diffstat (limited to 'arch/arm/kernel/smp.c')
| -rw-r--r-- | arch/arm/kernel/smp.c | 78 |
1 files changed, 37 insertions, 41 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index c7bb168b0d97..50999886a8b5 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -48,7 +48,6 @@ #include <asm/mach/arch.h> #include <asm/mpu.h> -#define CREATE_TRACE_POINTS #include <trace/events/ipi.h> /* @@ -153,6 +152,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) secondary_data.pgdir = virt_to_phys(idmap_pgd); secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir); #endif + secondary_data.task = idle; sync_cache_w(&secondary_data); /* @@ -288,15 +288,11 @@ int __cpu_disable(void) } /* - * called on the thread which is asking for a CPU to be shutdown - - * waits until shutdown has completed, or it is timed out. + * called on the thread which is asking for a CPU to be shutdown after the + * shutdown completed. */ -void __cpu_die(unsigned int cpu) +void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { - if (!cpu_wait_death(cpu, 5)) { - pr_err("CPU%u: cpu didn't die\n", cpu); - return; - } pr_debug("CPU%u: shutdown\n", cpu); clear_tasks_mm_cpumask(cpu); @@ -319,7 +315,7 @@ void __cpu_die(unsigned int cpu) * of the other hotplug-cpu capable cores, so presumably coming * out of idle fixes this. */ -void arch_cpu_idle_dead(void) +void __noreturn arch_cpu_idle_dead(void) { unsigned int cpu = smp_processor_id(); @@ -336,11 +332,11 @@ void arch_cpu_idle_dead(void) flush_cache_louis(); /* - * Tell __cpu_die() that this CPU is now safe to dispose of. Once - * this returns, power and/or clocks can be removed at any point - * from this CPU and its cache by platform_cpu_kill(). + * Tell cpuhp_bp_sync_dead() that this CPU is now safe to dispose + * of. Once this returns, power and/or clocks can be removed at + * any point from this CPU and its cache by platform_cpu_kill(). */ - (void)cpu_report_death(); + cpuhp_ap_report_dead(); /* * Ensure that the cache lines associated with that completion are @@ -375,9 +371,14 @@ void arch_cpu_idle_dead(void) */ __asm__("mov sp, %0\n" " mov fp, #0\n" + " mov r0, %1\n" " b secondary_start_kernel" : - : "r" (task_stack_page(current) + THREAD_SIZE - 8)); + : "r" (task_stack_page(current) + THREAD_SIZE - 8), + "r" (current) + : "r0"); + + unreachable(); } #endif /* CONFIG_HOTPLUG_CPU */ @@ -396,15 +397,23 @@ static void smp_store_cpu_info(unsigned int cpuid) check_cpu_icache_size(cpuid); } +static void set_current(struct task_struct *cur) +{ + /* Set TPIDRURO */ + asm("mcr p15, 0, %0, c13, c0, 3" :: "r"(cur) : "memory"); +} + /* * This is the secondary CPU boot entry. We're using this CPUs * idle thread stack, but a set of temporary page tables. */ -asmlinkage void secondary_start_kernel(void) +asmlinkage void secondary_start_kernel(struct task_struct *task) { struct mm_struct *mm = &init_mm; unsigned int cpu; + set_current(task); + secondary_biglittle_init(); /* @@ -542,7 +551,8 @@ void show_ipi_list(struct seq_file *p, int prec) if (!ipi_desc[i]) continue; - seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); + seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, + prec >= 4 ? " " : ""); for_each_online_cpu(cpu) seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu)); @@ -588,6 +598,8 @@ static DEFINE_RAW_SPINLOCK(stop_lock); */ static void ipi_cpu_stop(unsigned int cpu) { + local_fiq_disable(); + if (system_state <= SYSTEM_RUNNING) { raw_spin_lock(&stop_lock); pr_crit("CPU%u: stopping\n", cpu); @@ -597,9 +609,6 @@ static void ipi_cpu_stop(unsigned int cpu) set_cpu_online(cpu, false); - local_fiq_disable(); - local_irq_disable(); - while (1) { cpu_relax(); wfe(); @@ -622,17 +631,12 @@ static void ipi_complete(unsigned int cpu) /* * Main handler for inter-processor interrupts */ -asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) -{ - handle_IPI(ipinr, regs); -} - static void do_handle_IPI(int ipinr) { unsigned int cpu = smp_processor_id(); if ((unsigned)ipinr < NR_IPI) - trace_ipi_entry_rcuidle(ipi_types[ipinr]); + trace_ipi_entry(ipi_types[ipinr]); switch (ipinr) { case IPI_WAKEUP: @@ -667,9 +671,9 @@ static void do_handle_IPI(int ipinr) break; case IPI_CPU_BACKTRACE: - printk_nmi_enter(); + printk_deferred_enter(); nmi_cpu_backtrace(get_irq_regs()); - printk_nmi_exit(); + printk_deferred_exit(); break; default: @@ -679,7 +683,7 @@ static void do_handle_IPI(int ipinr) } if ((unsigned)ipinr < NR_IPI) - trace_ipi_exit_rcuidle(ipi_types[ipinr]); + trace_ipi_exit(ipi_types[ipinr]); } /* Legacy version, should go away once all irqchips have been converted */ @@ -702,7 +706,7 @@ static irqreturn_t ipi_handler(int irq, void *data) static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) { - trace_ipi_raise_rcuidle(target, ipi_types[ipinr]); + trace_ipi_raise(target, ipi_types[ipinr]); __ipi_send_mask(ipi_desc[ipinr], target); } @@ -741,7 +745,7 @@ void __init set_smp_ipi_range(int ipi_base, int n) ipi_setup(smp_processor_id()); } -void smp_send_reschedule(int cpu) +void arch_smp_send_reschedule(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } @@ -771,7 +775,7 @@ void smp_send_stop(void) * kdump fails. So split out the panic_smp_self_stop() and add * set_cpu_online(smp_processor_id(), false). */ -void panic_smp_self_stop(void) +void __noreturn panic_smp_self_stop(void) { pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n", smp_processor_id()); @@ -780,14 +784,6 @@ void panic_smp_self_stop(void) cpu_relax(); } -/* - * not supported here - */ -int setup_profiling_timer(unsigned int multiplier) -{ - return -EINVAL; -} - #ifdef CONFIG_CPU_FREQ static DEFINE_PER_CPU(unsigned long, l_p_j_ref); @@ -851,7 +847,7 @@ static void raise_nmi(cpumask_t *mask) __ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask); } -void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu) { - nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi); + nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_nmi); } |
