summaryrefslogtreecommitdiff
path: root/arch/arm/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel/smp.c')
-rw-r--r--arch/arm/kernel/smp.c556
1 files changed, 332 insertions, 224 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index c5fb5469054b..50999886a8b5 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -1,17 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/smp.c
*
* Copyright (C) 2002 ARM Limited, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
-#include <linux/sched.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/hotplug.h>
+#include <linux/sched/task_stack.h>
#include <linux/interrupt.h>
#include <linux/cache.h>
#include <linux/profile.h>
@@ -21,12 +20,16 @@
#include <linux/cpu.h>
#include <linux/seq_file.h>
#include <linux/irq.h>
+#include <linux/nmi.h>
#include <linux/percpu.h>
#include <linux/clockchips.h>
#include <linux/completion.h>
#include <linux/cpufreq.h>
+#include <linux/irq_work.h>
+#include <linux/kernel_stat.h>
#include <linux/atomic.h>
+#include <asm/bugs.h>
#include <asm/smp.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
@@ -35,18 +38,18 @@
#include <asm/idmap.h>
#include <asm/topology.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
+#include <asm/procinfo.h>
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
-#include <asm/localtimer.h>
#include <asm/smp_plat.h>
#include <asm/virt.h>
#include <asm/mach/arch.h>
#include <asm/mpu.h>
+#include <trace/events/ipi.h>
+
/*
* as from 2.5, kernels no longer have an init_tasks structure
* so we need some other way of telling a new secondary core
@@ -54,26 +57,39 @@
*/
struct secondary_data secondary_data;
-/*
- * control for which core is the next to come out of the secondary
- * boot "holding pen"
- */
-volatile int __cpuinitdata pen_release = -1;
-
enum ipi_msg_type {
IPI_WAKEUP,
IPI_TIMER,
IPI_RESCHEDULE,
IPI_CALL_FUNC,
- IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
+ IPI_IRQ_WORK,
+ IPI_COMPLETION,
+ NR_IPI,
+ /*
+ * CPU_BACKTRACE is special and not included in NR_IPI
+ * or tracable with trace_ipi_*
+ */
+ IPI_CPU_BACKTRACE = NR_IPI,
+ /*
+ * SGI8-15 can be reserved by secure firmware, and thus may
+ * not be usable by the kernel. Please keep the above limited
+ * to at most 8 entries.
+ */
+ MAX_IPI
};
+static int ipi_irq_base __read_mostly;
+static int nr_ipi __read_mostly = NR_IPI;
+static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly;
+
+static void ipi_setup(int cpu);
+
static DECLARE_COMPLETION(cpu_running);
-static struct smp_operations smp_ops;
+static struct smp_operations smp_ops __ro_after_init;
-void __init smp_set_ops(struct smp_operations *ops)
+void __init smp_set_ops(const struct smp_operations *ops)
{
if (ops)
smp_ops = *ops;
@@ -81,35 +97,68 @@ void __init smp_set_ops(struct smp_operations *ops)
static unsigned long get_arch_pgd(pgd_t *pgd)
{
- phys_addr_t pgdir = virt_to_phys(pgd);
- BUG_ON(pgdir & ARCH_PGD_MASK);
- return pgdir >> ARCH_PGD_SHIFT;
+#ifdef CONFIG_ARM_LPAE
+ return __phys_to_pfn(virt_to_phys(pgd));
+#else
+ return virt_to_phys(pgd);
+#endif
+}
+
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+static int secondary_biglittle_prepare(unsigned int cpu)
+{
+ if (!cpu_vtable[cpu])
+ cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
+
+ return cpu_vtable[cpu] ? 0 : -ENOMEM;
}
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
+static void secondary_biglittle_init(void)
+{
+ init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
+}
+#else
+static int secondary_biglittle_prepare(unsigned int cpu)
+{
+ return 0;
+}
+
+static void secondary_biglittle_init(void)
+{
+}
+#endif
+
+int __cpu_up(unsigned int cpu, struct task_struct *idle)
{
int ret;
+ if (!smp_ops.smp_boot_secondary)
+ return -ENOSYS;
+
+ ret = secondary_biglittle_prepare(cpu);
+ if (ret)
+ return ret;
+
/*
* We need to tell the secondary core where to find
* its stack and the page tables.
*/
secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
#ifdef CONFIG_ARM_MPU
- secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr;
+ secondary_data.mpu_rgn_info = &mpu_rgn_info;
#endif
#ifdef CONFIG_MMU
- secondary_data.pgdir = get_arch_pgd(idmap_pgd);
+ secondary_data.pgdir = virt_to_phys(idmap_pgd);
secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
#endif
- __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
- outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
+ secondary_data.task = idle;
+ sync_cache_w(&secondary_data);
/*
* Now bring the CPU into our world.
*/
- ret = boot_secondary(cpu, idle);
+ ret = smp_ops.smp_boot_secondary(cpu, idle);
if (ret == 0) {
/*
* CPU was successfully started, wait for it
@@ -138,16 +187,22 @@ void __init smp_init_cpus(void)
smp_ops.smp_init_cpus();
}
-int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+int platform_can_secondary_boot(void)
{
- if (smp_ops.smp_boot_secondary)
- return smp_ops.smp_boot_secondary(cpu, idle);
- return -ENOSYS;
+ return !!smp_ops.smp_boot_secondary;
}
+int platform_can_cpu_hotplug(void)
+{
#ifdef CONFIG_HOTPLUG_CPU
-static void percpu_timer_stop(void);
+ if (smp_ops.cpu_kill)
+ return 1;
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
static int platform_cpu_kill(unsigned int cpu)
{
if (smp_ops.cpu_kill)
@@ -160,17 +215,41 @@ static int platform_cpu_disable(unsigned int cpu)
if (smp_ops.cpu_disable)
return smp_ops.cpu_disable(cpu);
+ return 0;
+}
+
+int platform_can_hotplug_cpu(unsigned int cpu)
+{
+ /* cpu_die must be specified to support hotplug */
+ if (!smp_ops.cpu_die)
+ return 0;
+
+ if (smp_ops.cpu_can_disable)
+ return smp_ops.cpu_can_disable(cpu);
+
/*
* By default, allow disabling all CPUs except the first one,
* since this is special on a lot of platforms, e.g. because
* of clock tick interrupts.
*/
- return cpu == 0 ? -EPERM : 0;
+ return cpu != 0;
}
+
+static void ipi_teardown(int cpu)
+{
+ int i;
+
+ if (WARN_ON_ONCE(!ipi_irq_base))
+ return;
+
+ for (i = 0; i < nr_ipi; i++)
+ disable_percpu_irq(ipi_irq_base + i);
+}
+
/*
* __cpu_disable runs on the processor to be shutdown.
*/
-int __cpuinit __cpu_disable(void)
+int __cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
int ret;
@@ -179,21 +258,21 @@ int __cpuinit __cpu_disable(void)
if (ret)
return ret;
+#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
+ remove_cpu_topology(cpu);
+#endif
+
/*
* Take this CPU offline. Once we clear this, we can't return,
* and we must not schedule until we're ready to give up the cpu.
*/
set_cpu_online(cpu, false);
+ ipi_teardown(cpu);
/*
* OK - migrate IRQs away from this CPU
*/
- migrate_irqs();
-
- /*
- * Stop the local timer for this CPU.
- */
- percpu_timer_stop();
+ irq_migrate_all_off_this_cpu();
/*
* Flush user cache and TLB mappings, and then remove this CPU
@@ -205,25 +284,18 @@ int __cpuinit __cpu_disable(void)
flush_cache_louis();
local_flush_tlb_all();
- clear_tasks_mm_cpumask(cpu);
-
return 0;
}
-static DECLARE_COMPLETION(cpu_died);
-
/*
- * called on the thread which is asking for a CPU to be shutdown -
- * waits until shutdown has completed, or it is timed out.
+ * called on the thread which is asking for a CPU to be shutdown after the
+ * shutdown completed.
*/
-void __cpuinit __cpu_die(unsigned int cpu)
+void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
{
- if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
- pr_err("CPU%u: cpu didn't die\n", cpu);
- return;
- }
- printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
+ pr_debug("CPU%u: shutdown\n", cpu);
+ clear_tasks_mm_cpumask(cpu);
/*
* platform_cpu_kill() is generally expected to do the powering off
* and/or cutting of clocks to the dying CPU. Optionally, this may
@@ -232,7 +304,7 @@ void __cpuinit __cpu_die(unsigned int cpu)
* the requesting CPU and the dying CPU actually losing power.
*/
if (!platform_cpu_kill(cpu))
- printk("CPU%u: unable to kill\n", cpu);
+ pr_err("CPU%u: unable to kill\n", cpu);
}
/*
@@ -243,7 +315,7 @@ void __cpuinit __cpu_die(unsigned int cpu)
* of the other hotplug-cpu capable cores, so presumably coming
* out of idle fixes this.
*/
-void __ref cpu_die(void)
+void __noreturn arch_cpu_idle_dead(void)
{
unsigned int cpu = smp_processor_id();
@@ -260,11 +332,11 @@ void __ref cpu_die(void)
flush_cache_louis();
/*
- * Tell __cpu_die() that this CPU is now safe to dispose of. Once
- * this returns, power and/or clocks can be removed at any point
- * from this CPU and its cache by platform_cpu_kill().
+ * Tell cpuhp_bp_sync_dead() that this CPU is now safe to dispose
+ * of. Once this returns, power and/or clocks can be removed at
+ * any point from this CPU and its cache by platform_cpu_kill().
*/
- complete(&cpu_died);
+ cpuhp_ap_report_dead();
/*
* Ensure that the cache lines associated with that completion are
@@ -289,6 +361,9 @@ void __ref cpu_die(void)
if (smp_ops.cpu_die)
smp_ops.cpu_die(cpu);
+ pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
+ cpu);
+
/*
* Do not return to the idle loop - jump back to the secondary
* cpu initialisation. There's some initialisation which needs
@@ -296,9 +371,14 @@ void __ref cpu_die(void)
*/
__asm__("mov sp, %0\n"
" mov fp, #0\n"
+ " mov r0, %1\n"
" b secondary_start_kernel"
:
- : "r" (task_stack_page(current) + THREAD_SIZE - 8));
+ : "r" (task_stack_page(current) + THREAD_SIZE - 8),
+ "r" (current)
+ : "r0");
+
+ unreachable();
}
#endif /* CONFIG_HOTPLUG_CPU */
@@ -306,7 +386,7 @@ void __ref cpu_die(void)
* Called by both boot and secondaries to move global data into
* per-processor storage.
*/
-static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
+static void smp_store_cpu_info(unsigned int cpuid)
{
struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
@@ -314,19 +394,28 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
cpu_info->cpuid = read_cpuid_id();
store_cpu_topology(cpuid);
+ check_cpu_icache_size(cpuid);
}
-static void percpu_timer_setup(void);
+static void set_current(struct task_struct *cur)
+{
+ /* Set TPIDRURO */
+ asm("mcr p15, 0, %0, c13, c0, 3" :: "r"(cur) : "memory");
+}
/*
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
*/
-asmlinkage void __cpuinit secondary_start_kernel(void)
+asmlinkage void secondary_start_kernel(struct task_struct *task)
{
struct mm_struct *mm = &init_mm;
unsigned int cpu;
+ set_current(task);
+
+ secondary_biglittle_init();
+
/*
* The identity mapping is uncached (strongly ordered), so
* switch away from it before attempting any exclusive accesses.
@@ -341,15 +430,17 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
* reference and switch to it.
*/
cpu = smp_processor_id();
- atomic_inc(&mm->mm_count);
+ mmgrab(mm);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
cpu_init();
- printk("CPU%u: Booted secondary processor\n", cpu);
+#ifndef CONFIG_MMU
+ setup_vectors_base();
+#endif
+ pr_debug("CPU%u: Booted secondary processor\n", cpu);
- preempt_disable();
trace_hardirqs_off();
/*
@@ -360,6 +451,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
notify_cpu_starting(cpu);
+ ipi_setup(cpu);
+
calibrate_delay();
smp_store_cpu_info(cpu);
@@ -370,20 +463,19 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
* before we continue - which happens after __cpu_up returns.
*/
set_cpu_online(cpu, true);
- complete(&cpu_running);
- /*
- * Setup the percpu timer for this CPU.
- */
- percpu_timer_setup();
+ check_other_bugs();
+
+ complete(&cpu_running);
local_irq_enable();
local_fiq_enable();
+ local_abt_enable();
/*
* OK, it's off to the idle thread for us
*/
- cpu_startup_entry(CPUHP_ONLINE);
+ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
void __init smp_cpus_done(unsigned int max_cpus)
@@ -423,12 +515,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
max_cpus = ncores;
if (ncores > 1 && max_cpus) {
/*
- * Enable the local timer or broadcast device for the
- * boot CPU, but only if we have more than one CPU.
- */
- percpu_timer_setup();
-
- /*
* Initialise the present map, which describes the set of CPUs
* actually populated at the present time. A platform should
* re-initialize the map in the platforms smp_prepare_cpus()
@@ -445,135 +531,63 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
}
-static void (*smp_cross_call)(const struct cpumask *, unsigned int);
-
-void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
-{
- if (!smp_cross_call)
- smp_cross_call = fn;
-}
-
-void arch_send_call_function_ipi_mask(const struct cpumask *mask)
-{
- smp_cross_call(mask, IPI_CALL_FUNC);
-}
-
-void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
-{
- smp_cross_call(mask, IPI_WAKEUP);
-}
-
-void arch_send_call_function_single_ipi(int cpu)
-{
- smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
-}
-
-static const char *ipi_types[NR_IPI] = {
-#define S(x,s) [x] = s
- S(IPI_WAKEUP, "CPU wakeup interrupts"),
- S(IPI_TIMER, "Timer broadcast interrupts"),
- S(IPI_RESCHEDULE, "Rescheduling interrupts"),
- S(IPI_CALL_FUNC, "Function call interrupts"),
- S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
- S(IPI_CPU_STOP, "CPU stop interrupts"),
+static const char *ipi_types[NR_IPI] __tracepoint_string = {
+ [IPI_WAKEUP] = "CPU wakeup interrupts",
+ [IPI_TIMER] = "Timer broadcast interrupts",
+ [IPI_RESCHEDULE] = "Rescheduling interrupts",
+ [IPI_CALL_FUNC] = "Function call interrupts",
+ [IPI_CPU_STOP] = "CPU stop interrupts",
+ [IPI_IRQ_WORK] = "IRQ work interrupts",
+ [IPI_COMPLETION] = "completion interrupts",
};
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
+
void show_ipi_list(struct seq_file *p, int prec)
{
unsigned int cpu, i;
for (i = 0; i < NR_IPI; i++) {
- seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
+ if (!ipi_desc[i])
+ continue;
+
+ seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
+ prec >= 4 ? " " : "");
for_each_online_cpu(cpu)
- seq_printf(p, "%10u ",
- __get_irq_stat(cpu, ipi_irqs[i]));
+ seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
seq_printf(p, " %s\n", ipi_types[i]);
}
}
-u64 smp_irq_stat_cpu(unsigned int cpu)
-{
- u64 sum = 0;
- int i;
-
- for (i = 0; i < NR_IPI; i++)
- sum += __get_irq_stat(cpu, ipi_irqs[i]);
-
- return sum;
-}
-
-/*
- * Timer (local or broadcast) support
- */
-static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
-
-#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
-void tick_broadcast(const struct cpumask *mask)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
- smp_cross_call(mask, IPI_TIMER);
+ smp_cross_call(mask, IPI_CALL_FUNC);
}
-#endif
-static void broadcast_timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
{
+ smp_cross_call(mask, IPI_WAKEUP);
}
-static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
+void arch_send_call_function_single_ipi(int cpu)
{
- evt->name = "dummy_timer";
- evt->features = CLOCK_EVT_FEAT_ONESHOT |
- CLOCK_EVT_FEAT_PERIODIC |
- CLOCK_EVT_FEAT_DUMMY;
- evt->rating = 100;
- evt->mult = 1;
- evt->set_mode = broadcast_timer_set_mode;
-
- clockevents_register_device(evt);
+ smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
}
-static struct local_timer_ops *lt_ops;
-
-#ifdef CONFIG_LOCAL_TIMERS
-int local_timer_register(struct local_timer_ops *ops)
+#ifdef CONFIG_IRQ_WORK
+void arch_irq_work_raise(void)
{
- if (!is_smp() || !setup_max_cpus)
- return -ENXIO;
-
- if (lt_ops)
- return -EBUSY;
-
- lt_ops = ops;
- return 0;
+ if (arch_irq_work_has_interrupt())
+ smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
}
#endif
-static void __cpuinit percpu_timer_setup(void)
-{
- unsigned int cpu = smp_processor_id();
- struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
-
- evt->cpumask = cpumask_of(cpu);
-
- if (!lt_ops || lt_ops->setup(evt))
- broadcast_timer_setup(evt);
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-/*
- * The generic clock events code purposely does not stop the local timer
- * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
- * manually here.
- */
-static void percpu_timer_stop(void)
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+void tick_broadcast(const struct cpumask *mask)
{
- unsigned int cpu = smp_processor_id();
- struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
-
- if (lt_ops)
- lt_ops->stop(evt);
+ smp_cross_call(mask, IPI_TIMER);
}
#endif
@@ -584,38 +598,45 @@ static DEFINE_RAW_SPINLOCK(stop_lock);
*/
static void ipi_cpu_stop(unsigned int cpu)
{
- if (system_state == SYSTEM_BOOTING ||
- system_state == SYSTEM_RUNNING) {
+ local_fiq_disable();
+
+ if (system_state <= SYSTEM_RUNNING) {
raw_spin_lock(&stop_lock);
- printk(KERN_CRIT "CPU%u: stopping\n", cpu);
+ pr_crit("CPU%u: stopping\n", cpu);
dump_stack();
raw_spin_unlock(&stop_lock);
}
set_cpu_online(cpu, false);
- local_fiq_disable();
- local_irq_disable();
-
- while (1)
+ while (1) {
cpu_relax();
+ wfe();
+ }
}
-/*
- * Main handler for inter-processor interrupts
- */
-asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
+static DEFINE_PER_CPU(struct completion *, cpu_completion);
+
+int register_ipi_completion(struct completion *completion, int cpu)
{
- handle_IPI(ipinr, regs);
+ per_cpu(cpu_completion, cpu) = completion;
+ return IPI_COMPLETION;
}
-void handle_IPI(int ipinr, struct pt_regs *regs)
+static void ipi_complete(unsigned int cpu)
+{
+ complete(per_cpu(cpu_completion, cpu));
+}
+
+/*
+ * Main handler for inter-processor interrupts
+ */
+static void do_handle_IPI(int ipinr)
{
unsigned int cpu = smp_processor_id();
- struct pt_regs *old_regs = set_irq_regs(regs);
- if (ipinr < NR_IPI)
- __inc_irq_stat(cpu, ipi_irqs[ipinr]);
+ if ((unsigned)ipinr < NR_IPI)
+ trace_ipi_entry(ipi_types[ipinr]);
switch (ipinr) {
case IPI_WAKEUP:
@@ -623,9 +644,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
case IPI_TIMER:
- irq_enter();
tick_receive_broadcast();
- irq_exit();
break;
#endif
@@ -634,32 +653,99 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
break;
case IPI_CALL_FUNC:
- irq_enter();
generic_smp_call_function_interrupt();
- irq_exit();
- break;
-
- case IPI_CALL_FUNC_SINGLE:
- irq_enter();
- generic_smp_call_function_single_interrupt();
- irq_exit();
break;
case IPI_CPU_STOP:
- irq_enter();
ipi_cpu_stop(cpu);
- irq_exit();
+ break;
+
+#ifdef CONFIG_IRQ_WORK
+ case IPI_IRQ_WORK:
+ irq_work_run();
+ break;
+#endif
+
+ case IPI_COMPLETION:
+ ipi_complete(cpu);
+ break;
+
+ case IPI_CPU_BACKTRACE:
+ printk_deferred_enter();
+ nmi_cpu_backtrace(get_irq_regs());
+ printk_deferred_exit();
break;
default:
- printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
- cpu, ipinr);
+ pr_crit("CPU%u: Unknown IPI message 0x%x\n",
+ cpu, ipinr);
break;
}
+
+ if ((unsigned)ipinr < NR_IPI)
+ trace_ipi_exit(ipi_types[ipinr]);
+}
+
+/* Legacy version, should go away once all irqchips have been converted */
+void handle_IPI(int ipinr, struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+ do_handle_IPI(ipinr);
+ irq_exit();
+
set_irq_regs(old_regs);
}
-void smp_send_reschedule(int cpu)
+static irqreturn_t ipi_handler(int irq, void *data)
+{
+ do_handle_IPI(irq - ipi_irq_base);
+ return IRQ_HANDLED;
+}
+
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+ trace_ipi_raise(target, ipi_types[ipinr]);
+ __ipi_send_mask(ipi_desc[ipinr], target);
+}
+
+static void ipi_setup(int cpu)
+{
+ int i;
+
+ if (WARN_ON_ONCE(!ipi_irq_base))
+ return;
+
+ for (i = 0; i < nr_ipi; i++)
+ enable_percpu_irq(ipi_irq_base + i, 0);
+}
+
+void __init set_smp_ipi_range(int ipi_base, int n)
+{
+ int i;
+
+ WARN_ON(n < MAX_IPI);
+ nr_ipi = min(n, MAX_IPI);
+
+ for (i = 0; i < nr_ipi; i++) {
+ int err;
+
+ err = request_percpu_irq(ipi_base + i, ipi_handler,
+ "IPI", &irq_stat);
+ WARN_ON(err);
+
+ ipi_desc[i] = irq_to_desc(ipi_base + i);
+ irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
+ }
+
+ ipi_irq_base = ipi_base;
+
+ /* Setup the boot CPU immediately */
+ ipi_setup(smp_processor_id());
+}
+
+void arch_smp_send_reschedule(int cpu)
{
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}
@@ -680,15 +766,22 @@ void smp_send_stop(void)
udelay(1);
if (num_online_cpus() > 1)
- pr_warning("SMP: failed to stop secondary CPUs\n");
+ pr_warn("SMP: failed to stop secondary CPUs\n");
}
-/*
- * not supported here
+/* In case panic() and panic() called at the same time on CPU1 and CPU2,
+ * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop()
+ * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online,
+ * kdump fails. So split out the panic_smp_self_stop() and add
+ * set_cpu_online(smp_processor_id(), false).
*/
-int setup_profiling_timer(unsigned int multiplier)
+void __noreturn panic_smp_self_stop(void)
{
- return -EINVAL;
+ pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
+ smp_processor_id());
+ set_cpu_online(smp_processor_id(), false);
+ while (1)
+ cpu_relax();
}
#ifdef CONFIG_CPU_FREQ
@@ -702,15 +795,20 @@ static int cpufreq_callback(struct notifier_block *nb,
unsigned long val, void *data)
{
struct cpufreq_freqs *freq = data;
- int cpu = freq->cpu;
+ struct cpumask *cpus = freq->policy->cpus;
+ int cpu, first = cpumask_first(cpus);
+ unsigned int lpj;
if (freq->flags & CPUFREQ_CONST_LOOPS)
return NOTIFY_OK;
- if (!per_cpu(l_p_j_ref, cpu)) {
- per_cpu(l_p_j_ref, cpu) =
- per_cpu(cpu_data, cpu).loops_per_jiffy;
- per_cpu(l_p_j_ref_freq, cpu) = freq->old;
+ if (!per_cpu(l_p_j_ref, first)) {
+ for_each_cpu(cpu, cpus) {
+ per_cpu(l_p_j_ref, cpu) =
+ per_cpu(cpu_data, cpu).loops_per_jiffy;
+ per_cpu(l_p_j_ref_freq, cpu) = freq->old;
+ }
+
if (!global_l_p_j_ref) {
global_l_p_j_ref = loops_per_jiffy;
global_l_p_j_ref_freq = freq->old;
@@ -718,15 +816,15 @@ static int cpufreq_callback(struct notifier_block *nb,
}
if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
- (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
- (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
+ (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
global_l_p_j_ref_freq,
freq->new);
- per_cpu(cpu_data, cpu).loops_per_jiffy =
- cpufreq_scale(per_cpu(l_p_j_ref, cpu),
- per_cpu(l_p_j_ref_freq, cpu),
- freq->new);
+
+ lpj = cpufreq_scale(per_cpu(l_p_j_ref, first),
+ per_cpu(l_p_j_ref_freq, first), freq->new);
+ for_each_cpu(cpu, cpus)
+ per_cpu(cpu_data, cpu).loops_per_jiffy = lpj;
}
return NOTIFY_OK;
}
@@ -743,3 +841,13 @@ static int __init register_cpufreq_notifier(void)
core_initcall(register_cpufreq_notifier);
#endif
+
+static void raise_nmi(cpumask_t *mask)
+{
+ __ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
+}
+
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
+{
+ nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_nmi);
+}