diff options
Diffstat (limited to 'arch/arm64/kernel/smp.c')
-rw-r--r-- | arch/arm64/kernel/smp.c | 144 |
1 files changed, 93 insertions, 51 deletions
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 3b3f6b56e733..68cea3a4a35c 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -64,26 +64,18 @@ struct secondary_data secondary_data; /* Number of CPUs which aren't online, but looping in kernel text. */ static int cpus_stuck_in_kernel; -enum ipi_msg_type { - IPI_RESCHEDULE, - IPI_CALL_FUNC, - IPI_CPU_STOP, - IPI_CPU_STOP_NMI, - IPI_TIMER, - IPI_IRQ_WORK, - NR_IPI, - /* - * Any enum >= NR_IPI and < MAX_IPI is special and not tracable - * with trace_ipi_* - */ - IPI_CPU_BACKTRACE = NR_IPI, - IPI_KGDB_ROUNDUP, - MAX_IPI -}; - static int ipi_irq_base __ro_after_init; static int nr_ipi __ro_after_init = NR_IPI; -static struct irq_desc *ipi_desc[MAX_IPI] __ro_after_init; + +struct ipi_descs { + struct irq_desc *descs[MAX_IPI]; +}; + +static DEFINE_PER_CPU_READ_MOSTLY(struct ipi_descs, pcpu_ipi_desc); + +#define get_ipi_desc(__cpu, __ipi) (per_cpu_ptr(&pcpu_ipi_desc, __cpu)->descs[__ipi]) + +static bool percpu_ipi_descs __ro_after_init; static bool crash_stop; @@ -844,7 +836,7 @@ int arch_show_interrupts(struct seq_file *p, int prec) seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : ""); for_each_online_cpu(cpu) - seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu)); + seq_printf(p, "%10u ", irq_desc_kstat_cpu(get_ipi_desc(cpu, i), cpu)); seq_printf(p, " %s\n", ipi_types[i]); } @@ -917,9 +909,20 @@ static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs #endif } +static void arm64_send_ipi(const cpumask_t *mask, unsigned int nr) +{ + unsigned int cpu; + + if (!percpu_ipi_descs) + __ipi_send_mask(get_ipi_desc(0, nr), mask); + else + for_each_cpu(cpu, mask) + __ipi_send_single(get_ipi_desc(cpu, nr), cpu); +} + static void arm64_backtrace_ipi(cpumask_t *mask) { - __ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask); + arm64_send_ipi(mask, IPI_CPU_BACKTRACE); } void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu) @@ -944,7 +947,7 @@ void kgdb_roundup_cpus(void) if (cpu == this_cpu) continue; - __ipi_send_single(ipi_desc[IPI_KGDB_ROUNDUP], cpu); + __ipi_send_single(get_ipi_desc(cpu, IPI_KGDB_ROUNDUP), cpu); } } #endif @@ -1013,14 +1016,16 @@ static void do_handle_IPI(int ipinr) static irqreturn_t ipi_handler(int irq, void *data) { - do_handle_IPI(irq - ipi_irq_base); + unsigned int ipi = (irq - ipi_irq_base) % nr_ipi; + + do_handle_IPI(ipi); return IRQ_HANDLED; } static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) { trace_ipi_raise(target, ipi_types[ipinr]); - __ipi_send_mask(ipi_desc[ipinr], target); + arm64_send_ipi(target, ipinr); } static bool ipi_should_be_nmi(enum ipi_msg_type ipi) @@ -1046,11 +1051,15 @@ static void ipi_setup(int cpu) return; for (i = 0; i < nr_ipi; i++) { - if (ipi_should_be_nmi(i)) { - prepare_percpu_nmi(ipi_irq_base + i); - enable_percpu_nmi(ipi_irq_base + i, 0); + if (!percpu_ipi_descs) { + if (ipi_should_be_nmi(i)) { + prepare_percpu_nmi(ipi_irq_base + i); + enable_percpu_nmi(ipi_irq_base + i, 0); + } else { + enable_percpu_irq(ipi_irq_base + i, 0); + } } else { - enable_percpu_irq(ipi_irq_base + i, 0); + enable_irq(irq_desc_get_irq(get_ipi_desc(cpu, i))); } } } @@ -1064,44 +1073,77 @@ static void ipi_teardown(int cpu) return; for (i = 0; i < nr_ipi; i++) { - if (ipi_should_be_nmi(i)) { - disable_percpu_nmi(ipi_irq_base + i); - teardown_percpu_nmi(ipi_irq_base + i); + if (!percpu_ipi_descs) { + if (ipi_should_be_nmi(i)) { + disable_percpu_nmi(ipi_irq_base + i); + teardown_percpu_nmi(ipi_irq_base + i); + } else { + disable_percpu_irq(ipi_irq_base + i); + } } else { - disable_percpu_irq(ipi_irq_base + i); + disable_irq(irq_desc_get_irq(get_ipi_desc(cpu, i))); } } } #endif -void __init set_smp_ipi_range(int ipi_base, int n) +static void ipi_setup_sgi(int ipi) { - int i; + int err, irq, cpu; - WARN_ON(n < MAX_IPI); - nr_ipi = min(n, MAX_IPI); + irq = ipi_irq_base + ipi; - for (i = 0; i < nr_ipi; i++) { - int err; + if (ipi_should_be_nmi(ipi)) { + err = request_percpu_nmi(irq, ipi_handler, "IPI", &irq_stat); + WARN(err, "Could not request IRQ %d as NMI, err=%d\n", irq, err); + } else { + err = request_percpu_irq(irq, ipi_handler, "IPI", &irq_stat); + WARN(err, "Could not request IRQ %d as IRQ, err=%d\n", irq, err); + } - if (ipi_should_be_nmi(i)) { - err = request_percpu_nmi(ipi_base + i, ipi_handler, - "IPI", &irq_stat); - WARN(err, "Could not request IPI %d as NMI, err=%d\n", - i, err); - } else { - err = request_percpu_irq(ipi_base + i, ipi_handler, - "IPI", &irq_stat); - WARN(err, "Could not request IPI %d as IRQ, err=%d\n", - i, err); - } + for_each_possible_cpu(cpu) + get_ipi_desc(cpu, ipi) = irq_to_desc(irq); + + irq_set_status_flags(irq, IRQ_HIDDEN); +} + +static void ipi_setup_lpi(int ipi, int ncpus) +{ + for (int cpu = 0; cpu < ncpus; cpu++) { + int err, irq; + + irq = ipi_irq_base + (cpu * nr_ipi) + ipi; + + err = irq_force_affinity(irq, cpumask_of(cpu)); + WARN(err, "Could not force affinity IRQ %d, err=%d\n", irq, err); + + err = request_irq(irq, ipi_handler, IRQF_NO_AUTOEN, "IPI", + NULL); + WARN(err, "Could not request IRQ %d, err=%d\n", irq, err); + + irq_set_status_flags(irq, (IRQ_HIDDEN | IRQ_NO_BALANCING_MASK)); - ipi_desc[i] = irq_to_desc(ipi_base + i); - irq_set_status_flags(ipi_base + i, IRQ_HIDDEN); + get_ipi_desc(cpu, ipi) = irq_to_desc(irq); } +} + +void __init set_smp_ipi_range_percpu(int ipi_base, int n, int ncpus) +{ + int i; + + WARN_ON(n < MAX_IPI); + nr_ipi = min(n, MAX_IPI); + percpu_ipi_descs = !!ncpus; ipi_irq_base = ipi_base; + for (i = 0; i < nr_ipi; i++) { + if (!percpu_ipi_descs) + ipi_setup_sgi(i); + else + ipi_setup_lpi(i, ncpus); + } + /* Setup the boot CPU immediately */ ipi_setup(smp_processor_id()); } @@ -1143,7 +1185,7 @@ static inline unsigned int num_other_online_cpus(void) void smp_send_stop(void) { static unsigned long stop_in_progress; - cpumask_t mask; + static cpumask_t mask; unsigned long timeout; /* |