diff options
Diffstat (limited to 'arch/arm/kernel/smp.c')
| -rw-r--r-- | arch/arm/kernel/smp.c | 40 |
1 files changed, 19 insertions, 21 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 36e6efad89f3..50999886a8b5 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -48,7 +48,6 @@ #include <asm/mach/arch.h> #include <asm/mpu.h> -#define CREATE_TRACE_POINTS #include <trace/events/ipi.h> /* @@ -289,15 +288,11 @@ int __cpu_disable(void) } /* - * called on the thread which is asking for a CPU to be shutdown - - * waits until shutdown has completed, or it is timed out. + * called on the thread which is asking for a CPU to be shutdown after the + * shutdown completed. */ -void __cpu_die(unsigned int cpu) +void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { - if (!cpu_wait_death(cpu, 5)) { - pr_err("CPU%u: cpu didn't die\n", cpu); - return; - } pr_debug("CPU%u: shutdown\n", cpu); clear_tasks_mm_cpumask(cpu); @@ -320,7 +315,7 @@ void __cpu_die(unsigned int cpu) * of the other hotplug-cpu capable cores, so presumably coming * out of idle fixes this. */ -void arch_cpu_idle_dead(void) +void __noreturn arch_cpu_idle_dead(void) { unsigned int cpu = smp_processor_id(); @@ -337,11 +332,11 @@ void arch_cpu_idle_dead(void) flush_cache_louis(); /* - * Tell __cpu_die() that this CPU is now safe to dispose of. Once - * this returns, power and/or clocks can be removed at any point - * from this CPU and its cache by platform_cpu_kill(). + * Tell cpuhp_bp_sync_dead() that this CPU is now safe to dispose + * of. Once this returns, power and/or clocks can be removed at + * any point from this CPU and its cache by platform_cpu_kill(). */ - (void)cpu_report_death(); + cpuhp_ap_report_dead(); /* * Ensure that the cache lines associated with that completion are @@ -382,6 +377,8 @@ void arch_cpu_idle_dead(void) : "r" (task_stack_page(current) + THREAD_SIZE - 8), "r" (current) : "r0"); + + unreachable(); } #endif /* CONFIG_HOTPLUG_CPU */ @@ -554,7 +551,8 @@ void show_ipi_list(struct seq_file *p, int prec) if (!ipi_desc[i]) continue; - seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); + seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, + prec >= 4 ? " " : ""); for_each_online_cpu(cpu) seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu)); @@ -638,7 +636,7 @@ static void do_handle_IPI(int ipinr) unsigned int cpu = smp_processor_id(); if ((unsigned)ipinr < NR_IPI) - trace_ipi_entry_rcuidle(ipi_types[ipinr]); + trace_ipi_entry(ipi_types[ipinr]); switch (ipinr) { case IPI_WAKEUP: @@ -685,7 +683,7 @@ static void do_handle_IPI(int ipinr) } if ((unsigned)ipinr < NR_IPI) - trace_ipi_exit_rcuidle(ipi_types[ipinr]); + trace_ipi_exit(ipi_types[ipinr]); } /* Legacy version, should go away once all irqchips have been converted */ @@ -708,7 +706,7 @@ static irqreturn_t ipi_handler(int irq, void *data) static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) { - trace_ipi_raise_rcuidle(target, ipi_types[ipinr]); + trace_ipi_raise(target, ipi_types[ipinr]); __ipi_send_mask(ipi_desc[ipinr], target); } @@ -747,7 +745,7 @@ void __init set_smp_ipi_range(int ipi_base, int n) ipi_setup(smp_processor_id()); } -void smp_send_reschedule(int cpu) +void arch_smp_send_reschedule(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } @@ -777,7 +775,7 @@ void smp_send_stop(void) * kdump fails. So split out the panic_smp_self_stop() and add * set_cpu_online(smp_processor_id(), false). */ -void panic_smp_self_stop(void) +void __noreturn panic_smp_self_stop(void) { pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n", smp_processor_id()); @@ -849,7 +847,7 @@ static void raise_nmi(cpumask_t *mask) __ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask); } -void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu) { - nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi); + nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_nmi); } |
