diff options
Diffstat (limited to 'arch/mips/kernel/smp-cps.c')
-rw-r--r-- | arch/mips/kernel/smp-cps.c | 288 |
1 files changed, 240 insertions, 48 deletions
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index 82c8f9b9573c..e85bd087467e 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -36,11 +36,55 @@ enum label_id { UASM_L_LA(_not_nmi) -static DECLARE_BITMAP(core_power, NR_CPUS); static u64 core_entry_reg; static phys_addr_t cps_vec_pa; -struct core_boot_config *mips_cps_core_bootcfg; +struct cluster_boot_config *mips_cps_cluster_bootcfg; + +static void power_up_other_cluster(unsigned int cluster) +{ + u32 stat, seq_state; + unsigned int timeout; + + mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0, + CM_GCR_Cx_OTHER_BLOCK_LOCAL); + stat = read_cpc_co_stat_conf(); + mips_cm_unlock_other(); + + seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE; + seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE); + if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5) + return; + + /* Set endianness & power up the CM */ + mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); + write_cpc_redir_sys_config(IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)); + write_cpc_redir_pwrup_ctl(1); + mips_cm_unlock_other(); + + /* Wait for the CM to start up */ + timeout = 1000; + mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0, + CM_GCR_Cx_OTHER_BLOCK_LOCAL); + while (1) { + stat = read_cpc_co_stat_conf(); + seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE; + seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE); + if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5) + break; + + if (timeout) { + mdelay(1); + timeout--; + } else { + pr_warn("Waiting for cluster %u CM to power up... STAT_CONF=0x%x\n", + cluster, stat); + mdelay(1000); + } + } + + mips_cm_unlock_other(); +} static unsigned __init core_vpe_count(unsigned int cluster, unsigned core) { @@ -178,6 +222,9 @@ static void __init cps_smp_setup(void) pr_cont(","); pr_cont("{"); + if (mips_cm_revision() >= CM_REV_CM3_5) + power_up_other_cluster(cl); + ncores = mips_cps_numcores(cl); for (c = 0; c < ncores; c++) { core_vpes = core_vpe_count(cl, c); @@ -205,8 +252,8 @@ static void __init cps_smp_setup(void) /* Indicate present CPUs (CPU being synonymous with VPE) */ for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) { - set_cpu_possible(v, cpu_cluster(&cpu_data[v]) == 0); - set_cpu_present(v, cpu_cluster(&cpu_data[v]) == 0); + set_cpu_possible(v, true); + set_cpu_present(v, true); __cpu_number_map[v] = v; __cpu_logical_map[v] = v; } @@ -214,9 +261,6 @@ static void __init cps_smp_setup(void) /* Set a coherent default CCA (CWB) */ change_c0_config(CONF_CM_CMASK, 0x5); - /* Core 0 is powered up (we're running on it) */ - bitmap_set(core_power, 0, 1); - /* Initialise core 0 */ mips_cps_core_init(); @@ -238,8 +282,10 @@ static void __init cps_smp_setup(void) static void __init cps_prepare_cpus(unsigned int max_cpus) { - unsigned ncores, core_vpes, c, cca; + unsigned int nclusters, ncores, core_vpes, c, cl, cca; bool cca_unsuitable, cores_limited; + struct cluster_boot_config *cluster_bootcfg; + struct core_boot_config *core_bootcfg; mips_mt_set_cpuoptions(); @@ -281,40 +327,62 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) setup_cps_vecs(); - /* Allocate core boot configuration structs */ - ncores = mips_cps_numcores(0); - mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg), + /* Allocate cluster boot configuration structs */ + nclusters = mips_cps_numclusters(); + mips_cps_cluster_bootcfg = kcalloc(nclusters, + sizeof(*mips_cps_cluster_bootcfg), + GFP_KERNEL); + + if (nclusters > 1) + mips_cm_update_property(); + + for (cl = 0; cl < nclusters; cl++) { + /* Allocate core boot configuration structs */ + ncores = mips_cps_numcores(cl); + core_bootcfg = kcalloc(ncores, sizeof(*core_bootcfg), GFP_KERNEL); - if (!mips_cps_core_bootcfg) { - pr_err("Failed to allocate boot config for %u cores\n", ncores); - goto err_out; - } + if (!core_bootcfg) + goto err_out; + mips_cps_cluster_bootcfg[cl].core_config = core_bootcfg; - /* Allocate VPE boot configuration structs */ - for (c = 0; c < ncores; c++) { - core_vpes = core_vpe_count(0, c); - mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes, - sizeof(*mips_cps_core_bootcfg[c].vpe_config), + mips_cps_cluster_bootcfg[cl].core_power = + kcalloc(BITS_TO_LONGS(ncores), sizeof(unsigned long), GFP_KERNEL); - if (!mips_cps_core_bootcfg[c].vpe_config) { - pr_err("Failed to allocate %u VPE boot configs\n", - core_vpes); - goto err_out; + + /* Allocate VPE boot configuration structs */ + for (c = 0; c < ncores; c++) { + core_vpes = core_vpe_count(cl, c); + core_bootcfg[c].vpe_config = kcalloc(core_vpes, + sizeof(*core_bootcfg[c].vpe_config), + GFP_KERNEL); + if (!core_bootcfg[c].vpe_config) + goto err_out; } } - /* Mark this CPU as booted */ - atomic_set(&mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)].vpe_mask, - 1 << cpu_vpe_id(¤t_cpu_data)); + /* Mark this CPU as powered up & booted */ + cl = cpu_cluster(¤t_cpu_data); + c = cpu_core(¤t_cpu_data); + cluster_bootcfg = &mips_cps_cluster_bootcfg[cl]; + core_bootcfg = &cluster_bootcfg->core_config[c]; + bitmap_set(cluster_bootcfg->core_power, cpu_core(¤t_cpu_data), 1); + atomic_set(&core_bootcfg->vpe_mask, 1 << cpu_vpe_id(¤t_cpu_data)); return; err_out: /* Clean up allocations */ - if (mips_cps_core_bootcfg) { - for (c = 0; c < ncores; c++) - kfree(mips_cps_core_bootcfg[c].vpe_config); - kfree(mips_cps_core_bootcfg); - mips_cps_core_bootcfg = NULL; + if (mips_cps_cluster_bootcfg) { + for (cl = 0; cl < nclusters; cl++) { + cluster_bootcfg = &mips_cps_cluster_bootcfg[cl]; + ncores = mips_cps_numcores(cl); + for (c = 0; c < ncores; c++) { + core_bootcfg = &cluster_bootcfg->core_config[c]; + kfree(core_bootcfg->vpe_config); + } + kfree(mips_cps_cluster_bootcfg[c].core_config); + } + kfree(mips_cps_cluster_bootcfg); + mips_cps_cluster_bootcfg = NULL; } /* Effectively disable SMP by declaring CPUs not present */ @@ -325,13 +393,118 @@ err_out: } } -static void boot_core(unsigned int core, unsigned int vpe_id) +static void init_cluster_l2(void) { - u32 stat, seq_state; - unsigned timeout; + u32 l2_cfg, l2sm_cop, result; + + while (!mips_cm_is_l2_hci_broken) { + l2_cfg = read_gcr_redir_l2_ram_config(); + + /* If HCI is not supported, use the state machine below */ + if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_PRESENT)) + break; + if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_SUPPORTED)) + break; + + /* If the HCI_DONE bit is set, we're finished */ + if (l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_DONE) + return; + } + + l2sm_cop = read_gcr_redir_l2sm_cop(); + if (WARN(!(l2sm_cop & CM_GCR_L2SM_COP_PRESENT), + "L2 init not supported on this system yet")) + return; + + /* Clear L2 tag registers */ + write_gcr_redir_l2_tag_state(0); + write_gcr_redir_l2_ecc(0); + + /* Ensure the L2 tag writes complete before the state machine starts */ + mb(); + + /* Wait for the L2 state machine to be idle */ + do { + l2sm_cop = read_gcr_redir_l2sm_cop(); + } while (l2sm_cop & CM_GCR_L2SM_COP_RUNNING); + + /* Start a store tag operation */ + l2sm_cop = CM_GCR_L2SM_COP_TYPE_IDX_STORETAG; + l2sm_cop <<= __ffs(CM_GCR_L2SM_COP_TYPE); + l2sm_cop |= CM_GCR_L2SM_COP_CMD_START; + write_gcr_redir_l2sm_cop(l2sm_cop); + + /* Ensure the state machine starts before we poll for completion */ + mb(); + + /* Wait for the operation to be complete */ + do { + l2sm_cop = read_gcr_redir_l2sm_cop(); + result = l2sm_cop & CM_GCR_L2SM_COP_RESULT; + result >>= __ffs(CM_GCR_L2SM_COP_RESULT); + } while (!result); + + WARN(result != CM_GCR_L2SM_COP_RESULT_DONE_OK, + "L2 state machine failed cache init with error %u\n", result); +} + +static void boot_core(unsigned int cluster, unsigned int core, + unsigned int vpe_id) +{ + struct cluster_boot_config *cluster_cfg; + u32 access, stat, seq_state; + unsigned int timeout, ncores; + + cluster_cfg = &mips_cps_cluster_bootcfg[cluster]; + ncores = mips_cps_numcores(cluster); + + if ((cluster != cpu_cluster(¤t_cpu_data)) && + bitmap_empty(cluster_cfg->core_power, ncores)) { + power_up_other_cluster(cluster); + + mips_cm_lock_other(cluster, core, 0, + CM_GCR_Cx_OTHER_BLOCK_GLOBAL); + + /* Ensure cluster GCRs are where we expect */ + write_gcr_redir_base(read_gcr_base()); + write_gcr_redir_cpc_base(read_gcr_cpc_base()); + write_gcr_redir_gic_base(read_gcr_gic_base()); + + init_cluster_l2(); + + /* Mirror L2 configuration */ + write_gcr_redir_l2_only_sync_base(read_gcr_l2_only_sync_base()); + write_gcr_redir_l2_pft_control(read_gcr_l2_pft_control()); + write_gcr_redir_l2_pft_control_b(read_gcr_l2_pft_control_b()); + + /* Mirror ECC/parity setup */ + write_gcr_redir_err_control(read_gcr_err_control()); + + /* Set BEV base */ + write_gcr_redir_bev_base(core_entry_reg); + + mips_cm_unlock_other(); + } + + if (cluster != cpu_cluster(¤t_cpu_data)) { + mips_cm_lock_other(cluster, core, 0, + CM_GCR_Cx_OTHER_BLOCK_GLOBAL); + + /* Ensure the core can access the GCRs */ + access = read_gcr_redir_access(); + access |= BIT(core); + write_gcr_redir_access(access); + + mips_cm_unlock_other(); + } else { + /* Ensure the core can access the GCRs */ + access = read_gcr_access(); + access |= BIT(core); + write_gcr_access(access); + } /* Select the appropriate core */ - mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); + mips_cm_lock_other(cluster, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); /* Set its reset vector */ if (mips_cm_is64) @@ -400,30 +573,42 @@ static void boot_core(unsigned int core, unsigned int vpe_id) mips_cm_unlock_other(); /* The core is now powered up */ - bitmap_set(core_power, core, 1); + bitmap_set(cluster_cfg->core_power, core, 1); + + /* + * Restore CM_PWRUP=0 so that the CM can power down if all the cores in + * the cluster do (eg. if they're all removed via hotplug. + */ + if (mips_cm_revision() >= CM_REV_CM3_5) { + mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); + write_cpc_redir_pwrup_ctl(0); + mips_cm_unlock_other(); + } } static void remote_vpe_boot(void *dummy) { + unsigned int cluster = cpu_cluster(¤t_cpu_data); unsigned core = cpu_core(¤t_cpu_data); - struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; + struct cluster_boot_config *cluster_cfg = + &mips_cps_cluster_bootcfg[cluster]; + struct core_boot_config *core_cfg = &cluster_cfg->core_config[core]; mips_cps_boot_vpes(core_cfg, cpu_vpe_id(¤t_cpu_data)); } static int cps_boot_secondary(int cpu, struct task_struct *idle) { + unsigned int cluster = cpu_cluster(&cpu_data[cpu]); unsigned core = cpu_core(&cpu_data[cpu]); unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); - struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; + struct cluster_boot_config *cluster_cfg = + &mips_cps_cluster_bootcfg[cluster]; + struct core_boot_config *core_cfg = &cluster_cfg->core_config[core]; struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; unsigned int remote; int err; - /* We don't yet support booting CPUs in other clusters */ - if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data)) - return -ENOSYS; - vpe_cfg->pc = (unsigned long)&smp_bootstrap; vpe_cfg->sp = __KSTK_TOS(idle); vpe_cfg->gp = (unsigned long)task_thread_info(idle); @@ -432,14 +617,15 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle) preempt_disable(); - if (!test_bit(core, core_power)) { + if (!test_bit(core, cluster_cfg->core_power)) { /* Boot a VPE on a powered down core */ - boot_core(core, vpe_id); + boot_core(cluster, core, vpe_id); goto out; } if (cpu_has_vp) { - mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL); + mips_cm_lock_other(cluster, core, vpe_id, + CM_GCR_Cx_OTHER_BLOCK_LOCAL); if (mips_cm_is64) write_gcr_co_reset64_base(core_entry_reg); else @@ -576,12 +762,14 @@ static void cps_kexec_nonboot_cpu(void) static int cps_cpu_disable(void) { unsigned cpu = smp_processor_id(); + struct cluster_boot_config *cluster_cfg; struct core_boot_config *core_cfg; if (!cps_pm_support_state(CPS_PM_POWER_GATED)) return -EINVAL; - core_cfg = &mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)]; + cluster_cfg = &mips_cps_cluster_bootcfg[cpu_cluster(¤t_cpu_data)]; + core_cfg = &cluster_cfg->core_config[cpu_core(¤t_cpu_data)]; atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); smp_mb__after_atomic(); set_cpu_online(cpu, false); @@ -647,11 +835,15 @@ static void cps_cpu_die(unsigned int cpu) { } static void cps_cleanup_dead_cpu(unsigned cpu) { + unsigned int cluster = cpu_cluster(&cpu_data[cpu]); unsigned core = cpu_core(&cpu_data[cpu]); unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]); ktime_t fail_time; unsigned stat; int err; + struct cluster_boot_config *cluster_cfg; + + cluster_cfg = &mips_cps_cluster_bootcfg[cluster]; /* * Now wait for the CPU to actually offline. Without doing this that @@ -703,7 +895,7 @@ static void cps_cleanup_dead_cpu(unsigned cpu) } while (1); /* Indicate the core is powered off */ - bitmap_clear(core_power, core, 1); + bitmap_clear(cluster_cfg->core_power, core, 1); } else if (cpu_has_mipsmt) { /* * Have a CPU with access to the offlined CPUs registers wait |