summaryrefslogtreecommitdiff
path: root/arch/mips/kernel/smp-cps.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel/smp-cps.c')
-rw-r--r--arch/mips/kernel/smp-cps.c704
1 files changed, 537 insertions, 167 deletions
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index f832e99ad4c3..22d4f9ff3ae2 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -1,90 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2013 Imagination Technologies
- * Author: Paul Burton <paul.burton@imgtec.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
+ * Author: Paul Burton <paul.burton@mips.com>
*/
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/io.h>
-#include <linux/irqchip/mips-gic.h>
+#include <linux/memblock.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/hotplug.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/types.h>
+#include <linux/irq.h>
#include <asm/bcache.h>
-#include <asm/mips-cm.h>
-#include <asm/mips-cpc.h>
+#include <asm/mips-cps.h>
#include <asm/mips_mt.h>
#include <asm/mipsregs.h>
#include <asm/pm-cps.h>
#include <asm/r4kcache.h>
+#include <asm/regdef.h>
+#include <asm/smp.h>
#include <asm/smp-cps.h>
#include <asm/time.h>
#include <asm/uasm.h>
-static bool threads_disabled;
-static DECLARE_BITMAP(core_power, NR_CPUS);
+#define BEV_VEC_SIZE 0x500
+#define BEV_VEC_ALIGN 0x1000
-struct core_boot_config *mips_cps_core_bootcfg;
+enum label_id {
+ label_not_nmi = 1,
+};
+
+UASM_L_LA(_not_nmi)
+
+static u64 core_entry_reg;
+static phys_addr_t cps_vec_pa;
-static int __init setup_nothreads(char *s)
+struct cluster_boot_config *mips_cps_cluster_bootcfg;
+
+static void power_up_other_cluster(unsigned int cluster)
{
- threads_disabled = true;
- return 0;
+ u32 stat, seq_state;
+ unsigned int timeout;
+
+ mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0,
+ CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ stat = read_cpc_co_stat_conf();
+ mips_cm_unlock_other();
+
+ seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
+ seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
+ if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5)
+ return;
+
+ /* Set endianness & power up the CM */
+ mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
+ write_cpc_redir_sys_config(IS_ENABLED(CONFIG_CPU_BIG_ENDIAN));
+ write_cpc_redir_pwrup_ctl(1);
+ mips_cm_unlock_other();
+
+ /* Wait for the CM to start up */
+ timeout = 1000;
+ mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0,
+ CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ while (1) {
+ stat = read_cpc_co_stat_conf();
+ seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
+ seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
+ if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5)
+ break;
+
+ if (timeout) {
+ mdelay(1);
+ timeout--;
+ } else {
+ pr_warn("Waiting for cluster %u CM to power up... STAT_CONF=0x%x\n",
+ cluster, stat);
+ mdelay(1000);
+ }
+ }
+
+ mips_cm_unlock_other();
}
-early_param("nothreads", setup_nothreads);
-static unsigned core_vpe_count(unsigned core)
+static unsigned __init core_vpe_count(unsigned int cluster, unsigned core)
{
- unsigned cfg;
+ return min(smp_max_threads, mips_cps_numvps(cluster, core));
+}
+
+static void __init *mips_cps_build_core_entry(void *addr)
+{
+ extern void (*nmi_handler)(void);
+ u32 *p = addr;
+ u32 val;
+ struct uasm_label labels[2];
+ struct uasm_reloc relocs[2];
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ uasm_i_mfc0(&p, GPR_K0, C0_STATUS);
+ UASM_i_LA(&p, GPR_T9, ST0_NMI);
+ uasm_i_and(&p, GPR_K0, GPR_K0, GPR_T9);
+
+ uasm_il_bnez(&p, &r, GPR_K0, label_not_nmi);
+ uasm_i_nop(&p);
+ UASM_i_LA(&p, GPR_K0, (long)&nmi_handler);
+
+ uasm_l_not_nmi(&l, p);
+
+ val = CAUSEF_IV;
+ uasm_i_lui(&p, GPR_K0, val >> 16);
+ uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff);
+ uasm_i_mtc0(&p, GPR_K0, C0_CAUSE);
+ val = ST0_CU1 | ST0_CU0 | ST0_BEV | ST0_KX_IF_64;
+ uasm_i_lui(&p, GPR_K0, val >> 16);
+ uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff);
+ uasm_i_mtc0(&p, GPR_K0, C0_STATUS);
+ uasm_i_ehb(&p);
+ uasm_i_ori(&p, GPR_A0, 0, read_c0_config() & CONF_CM_CMASK);
+ UASM_i_LA(&p, GPR_A1, (long)mips_gcr_base);
+#if defined(KBUILD_64BIT_SYM32) || defined(CONFIG_32BIT)
+ UASM_i_LA(&p, GPR_T9, CKSEG1ADDR(__pa_symbol(mips_cps_core_boot)));
+#else
+ UASM_i_LA(&p, GPR_T9, TO_UNCAC(__pa_symbol(mips_cps_core_boot)));
+#endif
+ uasm_i_jr(&p, GPR_T9);
+ uasm_i_nop(&p);
- if (threads_disabled)
- return 1;
+ uasm_resolve_relocs(relocs, labels);
- if ((!IS_ENABLED(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
- && (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp))
- return 1;
+ return p;
+}
+
+static bool __init check_64bit_reset(void)
+{
+ bool cx_64bit_reset = false;
- mips_cm_lock_other(core, 0);
- cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK;
+ mips_cm_lock_other(0, 0, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ write_gcr_co_reset64_base(CM_GCR_Cx_RESET64_BASE_BEVEXCBASE);
+ if ((read_gcr_co_reset64_base() & CM_GCR_Cx_RESET64_BASE_BEVEXCBASE) ==
+ CM_GCR_Cx_RESET64_BASE_BEVEXCBASE)
+ cx_64bit_reset = true;
mips_cm_unlock_other();
- return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
+
+ return cx_64bit_reset;
+}
+
+static int __init allocate_cps_vecs(void)
+{
+ /* Try to allocate in KSEG1 first */
+ cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN,
+ 0x0, CSEGX_SIZE - 1);
+
+ if (cps_vec_pa)
+ core_entry_reg = CKSEG1ADDR(cps_vec_pa) &
+ CM_GCR_Cx_RESET_BASE_BEVEXCBASE;
+
+ if (!cps_vec_pa && mips_cm_is64) {
+ phys_addr_t end;
+
+ if (check_64bit_reset()) {
+ pr_info("VP Local Reset Exception Base support 47 bits address\n");
+ end = MEMBLOCK_ALLOC_ANYWHERE;
+ } else {
+ end = SZ_4G - 1;
+ }
+ cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN, 0, end);
+ if (cps_vec_pa) {
+ if (check_64bit_reset())
+ core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET64_BASE_BEVEXCBASE) |
+ CM_GCR_Cx_RESET_BASE_MODE;
+ else
+ core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET_BASE_BEVEXCBASE) |
+ CM_GCR_Cx_RESET_BASE_MODE;
+ }
+ }
+
+ if (!cps_vec_pa)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void __init setup_cps_vecs(void)
+{
+ void *cps_vec;
+
+ cps_vec = (void *)CKSEG1ADDR_OR_64BIT(cps_vec_pa);
+ mips_cps_build_core_entry(cps_vec);
+
+ memcpy(cps_vec + 0x200, &excep_tlbfill, 0x80);
+ memcpy(cps_vec + 0x280, &excep_xtlbfill, 0x80);
+ memcpy(cps_vec + 0x300, &excep_cache, 0x80);
+ memcpy(cps_vec + 0x380, &excep_genex, 0x80);
+ memcpy(cps_vec + 0x400, &excep_intex, 0x80);
+ memcpy(cps_vec + 0x480, &excep_ejtag, 0x80);
+
+ /* Make sure no prefetched data in cache */
+ blast_inv_dcache_range(CKSEG0ADDR_OR_64BIT(cps_vec_pa), CKSEG0ADDR_OR_64BIT(cps_vec_pa) + BEV_VEC_SIZE);
+ bc_inv(CKSEG0ADDR_OR_64BIT(cps_vec_pa), BEV_VEC_SIZE);
+ __sync();
}
static void __init cps_smp_setup(void)
{
- unsigned int ncores, nvpes, core_vpes;
- unsigned long core_entry;
- int c, v;
+ unsigned int nclusters, ncores, nvpes, core_vpes;
+ int cl, c, v;
/* Detect & record VPE topology */
- ncores = mips_cm_numcores();
+ nvpes = 0;
+ nclusters = mips_cps_numclusters();
pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE");
- for (c = nvpes = 0; c < ncores; c++) {
- core_vpes = core_vpe_count(c);
- pr_cont("%c%u", c ? ',' : '{', core_vpes);
-
- /* Use the number of VPEs in core 0 for smp_num_siblings */
- if (!c)
- smp_num_siblings = core_vpes;
-
- for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
- cpu_data[nvpes + v].core = c;
-#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
- cpu_data[nvpes + v].vpe_id = v;
-#endif
+ for (cl = 0; cl < nclusters; cl++) {
+ if (cl > 0)
+ pr_cont(",");
+ pr_cont("{");
+
+ if (mips_cm_revision() >= CM_REV_CM3_5)
+ power_up_other_cluster(cl);
+
+ ncores = mips_cps_numcores(cl);
+ for (c = 0; c < ncores; c++) {
+ core_vpes = core_vpe_count(cl, c);
+
+ if (c > 0)
+ pr_cont(",");
+ pr_cont("%u", core_vpes);
+
+ /* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */
+ if (!cl && !c)
+ smp_num_siblings = core_vpes;
+ cpumask_set_cpu(nvpes, &__cpu_primary_thread_mask);
+
+ for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
+ cpu_set_cluster(&cpu_data[nvpes + v], cl);
+ cpu_set_core(&cpu_data[nvpes + v], c);
+ cpu_set_vpe_id(&cpu_data[nvpes + v], v);
+ }
+
+ nvpes += core_vpes;
}
- nvpes += core_vpes;
+ pr_cont("}");
}
- pr_cont("} total %u\n", nvpes);
+ pr_cont(" total %u\n", nvpes);
/* Indicate present CPUs (CPU being synonymous with VPE) */
for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
@@ -97,19 +262,17 @@ static void __init cps_smp_setup(void)
/* Set a coherent default CCA (CWB) */
change_c0_config(CONF_CM_CMASK, 0x5);
- /* Core 0 is powered up (we're running on it) */
- bitmap_set(core_power, 0, 1);
-
/* Initialise core 0 */
mips_cps_core_init();
/* Make core 0 coherent with everything */
write_gcr_cl_coherence(0xff);
- if (mips_cm_revision() >= CM_REV_CM3) {
- core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
- write_gcr_bev_base(core_entry);
- }
+ if (allocate_cps_vecs())
+ pr_err("Failed to allocate CPS vectors\n");
+
+ if (core_entry_reg && mips_cm_revision() >= CM_REV_CM3)
+ write_gcr_bev_base(core_entry_reg);
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
@@ -118,14 +281,31 @@ static void __init cps_smp_setup(void)
#endif /* CONFIG_MIPS_MT_FPAFF */
}
+unsigned long calibrate_delay_is_known(void)
+{
+ int first_cpu_cluster = 0;
+
+ /* The calibration has to be done on the primary CPU of the cluster */
+ if (mips_cps_first_online_in_cluster(&first_cpu_cluster))
+ return 0;
+
+ return cpu_data[first_cpu_cluster].udelay_val;
+}
+
static void __init cps_prepare_cpus(unsigned int max_cpus)
{
- unsigned ncores, core_vpes, c, cca;
- bool cca_unsuitable;
- u32 *entry_code;
+ unsigned int nclusters, ncores, core_vpes, nvpe = 0, c, cl, cca;
+ bool cca_unsuitable, cores_limited;
+ struct cluster_boot_config *cluster_bootcfg;
+ struct core_boot_config *core_bootcfg;
mips_mt_set_cpuoptions();
+ if (!core_entry_reg) {
+ pr_err("core_entry address unsuitable, disabling smp-cps\n");
+ goto err_out;
+ }
+
/* Detect whether the CCA is unsuited to multi-core SMP */
cca = read_c0_config() & CONF_CM_CMASK;
switch (cca) {
@@ -141,65 +321,88 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
}
/* Warn the user if the CCA prevents multi-core */
- ncores = mips_cm_numcores();
- if ((cca_unsuitable || cpu_has_dc_aliases) && ncores > 1) {
+ cores_limited = false;
+ if (cca_unsuitable || cpu_has_dc_aliases) {
+ for_each_present_cpu(c) {
+ if (cpus_are_siblings(smp_processor_id(), c))
+ continue;
+
+ set_cpu_present(c, false);
+ cores_limited = true;
+ }
+ }
+ if (cores_limited)
pr_warn("Using only one core due to %s%s%s\n",
cca_unsuitable ? "unsuitable CCA" : "",
(cca_unsuitable && cpu_has_dc_aliases) ? " & " : "",
cpu_has_dc_aliases ? "dcache aliasing" : "");
- for_each_present_cpu(c) {
- if (cpu_data[c].core)
- set_cpu_present(c, false);
- }
- }
+ setup_cps_vecs();
- /*
- * Patch the start of mips_cps_core_entry to provide:
- *
- * s0 = kseg0 CCA
- */
- entry_code = (u32 *)&mips_cps_core_entry;
- uasm_i_addiu(&entry_code, 16, 0, cca);
- blast_dcache_range((unsigned long)&mips_cps_core_entry,
- (unsigned long)entry_code);
- bc_wback_inv((unsigned long)&mips_cps_core_entry,
- (void *)entry_code - (void *)&mips_cps_core_entry);
- __sync();
+ /* Allocate cluster boot configuration structs */
+ nclusters = mips_cps_numclusters();
+ mips_cps_cluster_bootcfg = kcalloc(nclusters,
+ sizeof(*mips_cps_cluster_bootcfg),
+ GFP_KERNEL);
+ if (!mips_cps_cluster_bootcfg)
+ goto err_out;
+
+ if (nclusters > 1)
+ mips_cm_update_property();
- /* Allocate core boot configuration structs */
- mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
+ for (cl = 0; cl < nclusters; cl++) {
+ /* Allocate core boot configuration structs */
+ ncores = mips_cps_numcores(cl);
+ core_bootcfg = kcalloc(ncores, sizeof(*core_bootcfg),
GFP_KERNEL);
- if (!mips_cps_core_bootcfg) {
- pr_err("Failed to allocate boot config for %u cores\n", ncores);
- goto err_out;
- }
+ if (!core_bootcfg)
+ goto err_out;
+ mips_cps_cluster_bootcfg[cl].core_config = core_bootcfg;
- /* Allocate VPE boot configuration structs */
- for (c = 0; c < ncores; c++) {
- core_vpes = core_vpe_count(c);
- mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
- sizeof(*mips_cps_core_bootcfg[c].vpe_config),
+ mips_cps_cluster_bootcfg[cl].core_power =
+ kcalloc(BITS_TO_LONGS(ncores), sizeof(unsigned long),
GFP_KERNEL);
- if (!mips_cps_core_bootcfg[c].vpe_config) {
- pr_err("Failed to allocate %u VPE boot configs\n",
- core_vpes);
+ if (!mips_cps_cluster_bootcfg[cl].core_power)
goto err_out;
+
+ /* Allocate VPE boot configuration structs */
+ for (c = 0; c < ncores; c++) {
+ int v;
+ core_vpes = core_vpe_count(cl, c);
+ core_bootcfg[c].vpe_config = kcalloc(core_vpes,
+ sizeof(*core_bootcfg[c].vpe_config),
+ GFP_KERNEL);
+ for (v = 0; v < core_vpes; v++)
+ cpumask_set_cpu(nvpe++, &mips_cps_cluster_bootcfg[cl].cpumask);
+ if (!core_bootcfg[c].vpe_config)
+ goto err_out;
}
}
- /* Mark this CPU as booted */
- atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask,
- 1 << cpu_vpe_id(&current_cpu_data));
+ /* Mark this CPU as powered up & booted */
+ cl = cpu_cluster(&current_cpu_data);
+ c = cpu_core(&current_cpu_data);
+ cluster_bootcfg = &mips_cps_cluster_bootcfg[cl];
+ cpu_smt_set_num_threads(core_vpes, core_vpes);
+ core_bootcfg = &cluster_bootcfg->core_config[c];
+ bitmap_set(cluster_bootcfg->core_power, cpu_core(&current_cpu_data), 1);
+ atomic_set(&core_bootcfg->vpe_mask, 1 << cpu_vpe_id(&current_cpu_data));
return;
err_out:
/* Clean up allocations */
- if (mips_cps_core_bootcfg) {
- for (c = 0; c < ncores; c++)
- kfree(mips_cps_core_bootcfg[c].vpe_config);
- kfree(mips_cps_core_bootcfg);
- mips_cps_core_bootcfg = NULL;
+ if (mips_cps_cluster_bootcfg) {
+ for (cl = 0; cl < nclusters; cl++) {
+ cluster_bootcfg = &mips_cps_cluster_bootcfg[cl];
+ ncores = mips_cps_numcores(cl);
+ for (c = 0; c < ncores; c++) {
+ core_bootcfg = &cluster_bootcfg->core_config[c];
+ kfree(core_bootcfg->vpe_config);
+ }
+ kfree(mips_cps_cluster_bootcfg[c].core_config);
+ }
+ kfree(mips_cps_cluster_bootcfg);
+ mips_cps_cluster_bootcfg = NULL;
}
/* Effectively disable SMP by declaring CPUs not present */
@@ -210,27 +413,136 @@ err_out:
}
}
-static void boot_core(unsigned int core, unsigned int vpe_id)
+static void init_cluster_l2(void)
+{
+ u32 l2_cfg, l2sm_cop, result;
+
+ while (!mips_cm_is_l2_hci_broken) {
+ l2_cfg = read_gcr_redir_l2_ram_config();
+
+ /* If HCI is not supported, use the state machine below */
+ if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_PRESENT))
+ break;
+ if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_SUPPORTED))
+ break;
+
+ /* If the HCI_DONE bit is set, we're finished */
+ if (l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_DONE)
+ return;
+ }
+
+ l2sm_cop = read_gcr_redir_l2sm_cop();
+ if (WARN(!(l2sm_cop & CM_GCR_L2SM_COP_PRESENT),
+ "L2 init not supported on this system yet"))
+ return;
+
+ /* Clear L2 tag registers */
+ write_gcr_redir_l2_tag_state(0);
+ write_gcr_redir_l2_ecc(0);
+
+ /* Ensure the L2 tag writes complete before the state machine starts */
+ mb();
+
+ /* Wait for the L2 state machine to be idle */
+ do {
+ l2sm_cop = read_gcr_redir_l2sm_cop();
+ } while (l2sm_cop & CM_GCR_L2SM_COP_RUNNING);
+
+ /* Start a store tag operation */
+ l2sm_cop = CM_GCR_L2SM_COP_TYPE_IDX_STORETAG;
+ l2sm_cop <<= __ffs(CM_GCR_L2SM_COP_TYPE);
+ l2sm_cop |= CM_GCR_L2SM_COP_CMD_START;
+ write_gcr_redir_l2sm_cop(l2sm_cop);
+
+ /* Ensure the state machine starts before we poll for completion */
+ mb();
+
+ /* Wait for the operation to be complete */
+ do {
+ l2sm_cop = read_gcr_redir_l2sm_cop();
+ result = l2sm_cop & CM_GCR_L2SM_COP_RESULT;
+ result >>= __ffs(CM_GCR_L2SM_COP_RESULT);
+ } while (!result);
+
+ WARN(result != CM_GCR_L2SM_COP_RESULT_DONE_OK,
+ "L2 state machine failed cache init with error %u\n", result);
+}
+
+static void boot_core(unsigned int cluster, unsigned int core,
+ unsigned int vpe_id)
{
+ struct cluster_boot_config *cluster_cfg;
u32 access, stat, seq_state;
- unsigned timeout;
+ unsigned int timeout, ncores;
+
+ cluster_cfg = &mips_cps_cluster_bootcfg[cluster];
+ ncores = mips_cps_numcores(cluster);
+
+ if ((cluster != cpu_cluster(&current_cpu_data)) &&
+ bitmap_empty(cluster_cfg->core_power, ncores)) {
+ power_up_other_cluster(cluster);
+
+ mips_cm_lock_other(cluster, core, 0,
+ CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
+
+ /* Ensure cluster GCRs are where we expect */
+ write_gcr_redir_base(read_gcr_base());
+ write_gcr_redir_cpc_base(read_gcr_cpc_base());
+ write_gcr_redir_gic_base(read_gcr_gic_base());
+
+ init_cluster_l2();
+
+ /* Mirror L2 configuration */
+ write_gcr_redir_l2_only_sync_base(read_gcr_l2_only_sync_base());
+ write_gcr_redir_l2_pft_control(read_gcr_l2_pft_control());
+ write_gcr_redir_l2_pft_control_b(read_gcr_l2_pft_control_b());
+
+ /* Mirror ECC/parity setup */
+ write_gcr_redir_err_control(read_gcr_err_control());
+
+ /* Set BEV base */
+ write_gcr_redir_bev_base(core_entry_reg);
+
+ mips_cm_unlock_other();
+ }
+
+ if (cluster != cpu_cluster(&current_cpu_data)) {
+ mips_cm_lock_other(cluster, core, 0,
+ CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
+
+ /* Ensure the core can access the GCRs */
+ access = read_gcr_redir_access();
+ access |= BIT(core);
+ write_gcr_redir_access(access);
+
+ mips_cm_unlock_other();
+ } else {
+ /* Ensure the core can access the GCRs */
+ access = read_gcr_access();
+ access |= BIT(core);
+ write_gcr_access(access);
+ }
/* Select the appropriate core */
- mips_cm_lock_other(core, 0);
+ mips_cm_lock_other(cluster, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
/* Set its reset vector */
- write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
+ if (mips_cm_is64)
+ write_gcr_co_reset64_base(core_entry_reg);
+ else
+ write_gcr_co_reset_base(core_entry_reg);
/* Ensure its coherency is disabled */
write_gcr_co_coherence(0);
/* Start it with the legacy memory map and exception base */
- write_gcr_co_reset_ext_base(CM_GCR_RESET_EXT_BASE_UEB);
+ write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB);
/* Ensure the core can access the GCRs */
- access = read_gcr_access();
- access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core);
- write_gcr_access(access);
+ if (mips_cm_revision() < CM_REV_CM3)
+ set_gcr_access(1 << core);
+ else
+ set_gcr_access_cm3(1 << core);
if (mips_cpc_present()) {
/* Reset the core */
@@ -253,7 +565,8 @@ static void boot_core(unsigned int core, unsigned int vpe_id)
timeout = 100;
while (true) {
stat = read_cpc_co_stat_conf();
- seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE_MSK;
+ seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
+ seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
/* U6 == coherent execution, ie. the core is up */
if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6)
@@ -280,24 +593,39 @@ static void boot_core(unsigned int core, unsigned int vpe_id)
mips_cm_unlock_other();
/* The core is now powered up */
- bitmap_set(core_power, core, 1);
+ bitmap_set(cluster_cfg->core_power, core, 1);
+
+ /*
+ * Restore CM_PWRUP=0 so that the CM can power down if all the cores in
+ * the cluster do (eg. if they're all removed via hotplug.
+ */
+ if (mips_cm_revision() >= CM_REV_CM3_5) {
+ mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
+ write_cpc_redir_pwrup_ctl(0);
+ mips_cm_unlock_other();
+ }
}
static void remote_vpe_boot(void *dummy)
{
- unsigned core = current_cpu_data.core;
- struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
+ unsigned int cluster = cpu_cluster(&current_cpu_data);
+ unsigned core = cpu_core(&current_cpu_data);
+ struct cluster_boot_config *cluster_cfg =
+ &mips_cps_cluster_bootcfg[cluster];
+ struct core_boot_config *core_cfg = &cluster_cfg->core_config[core];
mips_cps_boot_vpes(core_cfg, cpu_vpe_id(&current_cpu_data));
}
-static void cps_boot_secondary(int cpu, struct task_struct *idle)
+static int cps_boot_secondary(int cpu, struct task_struct *idle)
{
- unsigned core = cpu_data[cpu].core;
+ unsigned int cluster = cpu_cluster(&cpu_data[cpu]);
+ unsigned core = cpu_core(&cpu_data[cpu]);
unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
- struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
+ struct cluster_boot_config *cluster_cfg =
+ &mips_cps_cluster_bootcfg[cluster];
+ struct core_boot_config *core_cfg = &cluster_cfg->core_config[core];
struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
- unsigned long core_entry;
unsigned int remote;
int err;
@@ -309,23 +637,26 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle)
preempt_disable();
- if (!test_bit(core, core_power)) {
+ if (!test_bit(core, cluster_cfg->core_power)) {
/* Boot a VPE on a powered down core */
- boot_core(core, vpe_id);
+ boot_core(cluster, core, vpe_id);
goto out;
}
if (cpu_has_vp) {
- mips_cm_lock_other(core, vpe_id);
- core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
- write_gcr_co_reset_base(core_entry);
+ mips_cm_lock_other(cluster, core, vpe_id,
+ CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ if (mips_cm_is64)
+ write_gcr_co_reset64_base(core_entry_reg);
+ else
+ write_gcr_co_reset_base(core_entry_reg);
mips_cm_unlock_other();
}
- if (core != current_cpu_data.core) {
+ if (!cpus_are_siblings(cpu, smp_processor_id())) {
/* Boot a VPE on another powered up core */
for (remote = 0; remote < NR_CPUS; remote++) {
- if (cpu_data[remote].core != core)
+ if (!cpus_are_siblings(cpu, remote))
continue;
if (cpu_online(remote))
break;
@@ -349,16 +680,19 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle)
mips_cps_boot_vpes(core_cfg, vpe_id);
out:
preempt_enable();
+ return 0;
}
static void cps_init_secondary(void)
{
+ int core = cpu_core(&current_cpu_data);
+
/* Disable MT - we only want to run 1 TC per VPE */
if (cpu_has_mipsmt)
dmt();
if (mips_cm_revision() >= CM_REV_CM3) {
- unsigned ident = gic_read_local_vp_id();
+ unsigned int ident = read_gic_vl_ident();
/*
* Ensure that our calculation of the VP ID matches up with
@@ -368,6 +702,9 @@ static void cps_init_secondary(void)
BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
}
+ if (core > 0 && !read_gcr_cl_coherence())
+ pr_warn("Core %u is not in coherent domain\n", core);
+
if (cpu_has_veic)
clear_c0_status(ST0_IM);
else
@@ -389,42 +726,89 @@ static void cps_smp_finish(void)
local_irq_enable();
}
+#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC_CORE)
+
+enum cpu_death {
+ CPU_DEATH_HALT,
+ CPU_DEATH_POWER,
+};
+
+static void cps_shutdown_this_cpu(enum cpu_death death)
+{
+ unsigned int cpu, core, vpe_id;
+
+ cpu = smp_processor_id();
+ core = cpu_core(&cpu_data[cpu]);
+
+ if (death == CPU_DEATH_HALT) {
+ vpe_id = cpu_vpe_id(&cpu_data[cpu]);
+
+ pr_debug("Halting core %d VP%d\n", core, vpe_id);
+ if (cpu_has_mipsmt) {
+ /* Halt this TC */
+ write_c0_tchalt(TCHALT_H);
+ instruction_hazard();
+ } else if (cpu_has_vp) {
+ write_cpc_cl_vp_stop(1 << vpe_id);
+
+ /* Ensure that the VP_STOP register is written */
+ wmb();
+ }
+ } else {
+ if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
+ pr_debug("Gating power to core %d\n", core);
+ /* Power down the core */
+ cps_pm_enter_state(CPS_PM_POWER_GATED);
+ }
+ }
+}
+
+#ifdef CONFIG_KEXEC_CORE
+
+static void cps_kexec_nonboot_cpu(void)
+{
+ if (cpu_has_mipsmt || cpu_has_vp)
+ cps_shutdown_this_cpu(CPU_DEATH_HALT);
+ else
+ cps_shutdown_this_cpu(CPU_DEATH_POWER);
+}
+
+#endif /* CONFIG_KEXEC_CORE */
+
+#endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC_CORE */
+
#ifdef CONFIG_HOTPLUG_CPU
static int cps_cpu_disable(void)
{
unsigned cpu = smp_processor_id();
+ struct cluster_boot_config *cluster_cfg;
struct core_boot_config *core_cfg;
- if (!cpu)
- return -EBUSY;
-
if (!cps_pm_support_state(CPS_PM_POWER_GATED))
return -EINVAL;
- core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core];
+ cluster_cfg = &mips_cps_cluster_bootcfg[cpu_cluster(&current_cpu_data)];
+ core_cfg = &cluster_cfg->core_config[cpu_core(&current_cpu_data)];
atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
smp_mb__after_atomic();
set_cpu_online(cpu, false);
calculate_cpu_foreign_map();
+ irq_migrate_all_off_this_cpu();
return 0;
}
static unsigned cpu_death_sibling;
-static enum {
- CPU_DEATH_HALT,
- CPU_DEATH_POWER,
-} cpu_death;
+static enum cpu_death cpu_death;
void play_dead(void)
{
- unsigned int cpu, core, vpe_id;
+ unsigned int cpu;
local_irq_disable();
idle_task_exit();
cpu = smp_processor_id();
- core = cpu_data[cpu].core;
cpu_death = CPU_DEATH_POWER;
pr_debug("CPU%d going offline\n", cpu);
@@ -432,7 +816,7 @@ void play_dead(void)
if (cpu_has_mipsmt || cpu_has_vp) {
/* Look for another online VPE within the core */
for_each_online_cpu(cpu_death_sibling) {
- if (cpu_data[cpu_death_sibling].core != core)
+ if (!cpus_are_siblings(cpu, cpu_death_sibling))
continue;
/*
@@ -444,28 +828,9 @@ void play_dead(void)
}
}
- /* This CPU has chosen its way out */
- (void)cpu_report_death();
+ cpuhp_ap_report_dead();
- if (cpu_death == CPU_DEATH_HALT) {
- vpe_id = cpu_vpe_id(&cpu_data[cpu]);
-
- pr_debug("Halting core %d VP%d\n", core, vpe_id);
- if (cpu_has_mipsmt) {
- /* Halt this TC */
- write_c0_tchalt(TCHALT_H);
- instruction_hazard();
- } else if (cpu_has_vp) {
- write_cpc_cl_vp_stop(1 << vpe_id);
-
- /* Ensure that the VP_STOP register is written */
- wmb();
- }
- } else {
- pr_debug("Gating power to core %d\n", core);
- /* Power down the core */
- cps_pm_enter_state(CPS_PM_POWER_GATED);
- }
+ cps_shutdown_this_cpu(cpu_death);
/* This should never be reached */
panic("Failed to offline CPU %u", cpu);
@@ -486,19 +851,19 @@ static void wait_for_sibling_halt(void *ptr_cpu)
} while (!(halted & TCHALT_H));
}
-static void cps_cpu_die(unsigned int cpu)
+static void cps_cpu_die(unsigned int cpu) { }
+
+static void cps_cleanup_dead_cpu(unsigned cpu)
{
- unsigned core = cpu_data[cpu].core;
+ unsigned int cluster = cpu_cluster(&cpu_data[cpu]);
+ unsigned core = cpu_core(&cpu_data[cpu]);
unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
ktime_t fail_time;
unsigned stat;
int err;
+ struct cluster_boot_config *cluster_cfg;
- /* Wait for the cpu to choose its way out */
- if (!cpu_wait_death(cpu, 5)) {
- pr_err("CPU%u: didn't offline\n", cpu);
- return;
- }
+ cluster_cfg = &mips_cps_cluster_bootcfg[cluster];
/*
* Now wait for the CPU to actually offline. Without doing this that
@@ -519,10 +884,11 @@ static void cps_cpu_die(unsigned int cpu)
*/
fail_time = ktime_add_ms(ktime_get(), 2000);
do {
- mips_cm_lock_other(core, 0);
+ mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
mips_cpc_lock_other(core);
stat = read_cpc_co_stat_conf();
- stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;
+ stat &= CPC_Cx_STAT_CONF_SEQSTATE;
+ stat >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
mips_cpc_unlock_other();
mips_cm_unlock_other();
@@ -544,12 +910,12 @@ static void cps_cpu_die(unsigned int cpu)
*/
if (WARN(ktime_after(ktime_get(), fail_time),
"CPU%u hasn't powered down, seq. state %u\n",
- cpu, stat >> CPC_Cx_STAT_CONF_SEQSTATE_SHF))
+ cpu, stat))
break;
} while (1);
/* Indicate the core is powered off */
- bitmap_clear(core_power, core, 1);
+ bitmap_clear(cluster_cfg->core_power, core, 1);
} else if (cpu_has_mipsmt) {
/*
* Have a CPU with access to the offlined CPUs registers wait
@@ -562,7 +928,7 @@ static void cps_cpu_die(unsigned int cpu)
panic("Failed to call remote sibling CPU\n");
} else if (cpu_has_vp) {
do {
- mips_cm_lock_other(core, vpe_id);
+ mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
stat = read_cpc_co_vp_running();
mips_cm_unlock_other();
} while (stat & (1 << vpe_id));
@@ -571,7 +937,7 @@ static void cps_cpu_die(unsigned int cpu)
#endif /* CONFIG_HOTPLUG_CPU */
-static struct plat_smp_ops cps_smp_ops = {
+static const struct plat_smp_ops cps_smp_ops = {
.smp_setup = cps_smp_setup,
.prepare_cpus = cps_prepare_cpus,
.boot_secondary = cps_boot_secondary,
@@ -582,12 +948,16 @@ static struct plat_smp_ops cps_smp_ops = {
#ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = cps_cpu_disable,
.cpu_die = cps_cpu_die,
+ .cleanup_dead_cpu = cps_cleanup_dead_cpu,
+#endif
+#ifdef CONFIG_KEXEC_CORE
+ .kexec_nonboot_cpu = cps_kexec_nonboot_cpu,
#endif
};
bool mips_cps_smp_in_use(void)
{
- extern struct plat_smp_ops *mp_ops;
+ extern const struct plat_smp_ops *mp_ops;
return mp_ops == &cps_smp_ops;
}
@@ -599,7 +969,7 @@ int register_cps_smp_ops(void)
}
/* check we have a GIC - we need one for IPIs */
- if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX_MSK)) {
+ if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX)) {
pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
return -ENODEV;
}