diff options
Diffstat (limited to 'arch/mips/kernel/mips-cm.c')
| -rw-r--r-- | arch/mips/kernel/mips-cm.c | 85 |
1 files changed, 63 insertions, 22 deletions
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index 90f1c3df1f0e..7c9c5dc38823 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -5,15 +5,18 @@ */ #include <linux/errno.h> +#include <linux/of.h> #include <linux/percpu.h> #include <linux/spinlock.h> #include <asm/mips-cps.h> +#include <asm/smp-cps.h> #include <asm/mipsregs.h> void __iomem *mips_gcr_base; void __iomem *mips_cm_l2sync_base; int mips_cm_is64; +bool mips_cm_is_l2_hci_broken; static char *cm2_tr[8] = { "mem", "gcr", "gic", "mmio", @@ -179,13 +182,18 @@ static char *cm3_causes[32] = { static DEFINE_PER_CPU_ALIGNED(spinlock_t, cm_core_lock); static DEFINE_PER_CPU_ALIGNED(unsigned long, cm_core_lock_flags); -phys_addr_t __mips_cm_phys_base(void) +phys_addr_t __weak mips_cm_phys_base(void) { - u32 config3 = read_c0_config3(); unsigned long cmgcr; /* Check the CMGCRBase register is implemented */ - if (!(config3 & MIPS_CONF3_CMGCR)) + if (!(read_c0_config() & MIPS_CONF_M)) + return 0; + + if (!(read_c0_config2() & MIPS_CONF_M)) + return 0; + + if (!(read_c0_config3() & MIPS_CONF3_CMGCR)) return 0; /* Read the address from CMGCRBase */ @@ -193,10 +201,7 @@ phys_addr_t __mips_cm_phys_base(void) return (cmgcr & MIPS_CMGCRF_BASE) << (36 - 32); } -phys_addr_t mips_cm_phys_base(void) - __attribute__((weak, alias("__mips_cm_phys_base"))); - -phys_addr_t __mips_cm_l2sync_phys_base(void) +phys_addr_t __weak mips_cm_l2sync_phys_base(void) { u32 base_reg; @@ -212,17 +217,13 @@ phys_addr_t __mips_cm_l2sync_phys_base(void) return mips_cm_phys_base() + MIPS_CM_GCR_SIZE; } -phys_addr_t mips_cm_l2sync_phys_base(void) - __attribute__((weak, alias("__mips_cm_l2sync_phys_base"))); - static void mips_cm_probe_l2sync(void) { unsigned major_rev; phys_addr_t addr; /* L2-only sync was introduced with CM major revision 6 */ - major_rev = (read_gcr_rev() & CM_GCR_REV_MAJOR) >> - __ffs(CM_GCR_REV_MAJOR); + major_rev = FIELD_GET(CM_GCR_REV_MAJOR, read_gcr_rev()); if (major_rev < 6) return; @@ -239,6 +240,23 @@ static void mips_cm_probe_l2sync(void) mips_cm_l2sync_base = ioremap(addr, MIPS_CM_L2SYNC_SIZE); } +void mips_cm_update_property(void) +{ + struct device_node *cm_node; + + cm_node = of_find_compatible_node(of_root, NULL, "mobileye,eyeq6-cm"); + if (!cm_node) + return; + pr_info("HCI (Hardware Cache Init for the L2 cache) in GCR_L2_RAM_CONFIG from the CM3 is broken"); + mips_cm_is_l2_hci_broken = true; + + /* Disable MMID only if it was configured */ + if (cpu_has_mmid) + cpu_disable_mmid(); + + of_node_put(cm_node); +} + int mips_cm_probe(void) { phys_addr_t addr; @@ -306,13 +324,15 @@ void mips_cm_lock_other(unsigned int cluster, unsigned int core, preempt_disable(); if (cm_rev >= CM_REV_CM3) { - val = core << __ffs(CM3_GCR_Cx_OTHER_CORE); - val |= vp << __ffs(CM3_GCR_Cx_OTHER_VP); + val = FIELD_PREP(CM3_GCR_Cx_OTHER_CORE, core) | + FIELD_PREP(CM3_GCR_Cx_OTHER_VP, vp); if (cm_rev >= CM_REV_CM3_5) { - val |= CM_GCR_Cx_OTHER_CLUSTER_EN; - val |= cluster << __ffs(CM_GCR_Cx_OTHER_CLUSTER); - val |= block << __ffs(CM_GCR_Cx_OTHER_BLOCK); + if (cluster != cpu_cluster(¤t_cpu_data)) + val |= CM_GCR_Cx_OTHER_CLUSTER_EN; + val |= CM_GCR_Cx_OTHER_GIC_EN; + val |= FIELD_PREP(CM_GCR_Cx_OTHER_CLUSTER, cluster); + val |= FIELD_PREP(CM_GCR_Cx_OTHER_BLOCK, block); } else { WARN_ON(cluster != 0); WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL); @@ -342,7 +362,7 @@ void mips_cm_lock_other(unsigned int cluster, unsigned int core, spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core), per_cpu(cm_core_lock_flags, curr_core)); - val = core << __ffs(CM_GCR_Cx_OTHER_CORENUM); + val = FIELD_PREP(CM_GCR_Cx_OTHER_CORENUM, core); } write_gcr_cl_other(val); @@ -386,8 +406,8 @@ void mips_cm_error_report(void) cm_other = read_gcr_error_mult(); if (revision < CM_REV_CM3) { /* CM2 */ - cause = cm_error >> __ffs(CM_GCR_ERROR_CAUSE_ERRTYPE); - ocause = cm_other >> __ffs(CM_GCR_ERROR_MULT_ERR2ND); + cause = FIELD_GET(CM_GCR_ERROR_CAUSE_ERRTYPE, cm_error); + ocause = FIELD_GET(CM_GCR_ERROR_MULT_ERR2ND, cm_other); if (!cause) return; @@ -445,8 +465,8 @@ void mips_cm_error_report(void) ulong core_id_bits, vp_id_bits, cmd_bits, cmd_group_bits; ulong cm3_cca_bits, mcp_bits, cm3_tr_bits, sched_bit; - cause = cm_error >> __ffs64(CM3_GCR_ERROR_CAUSE_ERRTYPE); - ocause = cm_other >> __ffs(CM_GCR_ERROR_MULT_ERR2ND); + cause = FIELD_GET(CM3_GCR_ERROR_CAUSE_ERRTYPE, cm_error); + ocause = FIELD_GET(CM_GCR_ERROR_MULT_ERR2ND, cm_other); if (!cause) return; @@ -514,3 +534,24 @@ void mips_cm_error_report(void) /* reprime cause register */ write_gcr_error_cause(cm_error); } + +unsigned int mips_cps_first_online_in_cluster(int *first_cpu) +{ + unsigned int local_cl = cpu_cluster(¤t_cpu_data); + struct cpumask *local_cl_mask; + + /* + * mips_cps_cluster_bootcfg is allocated in cps_prepare_cpus. If it is + * not yet done, then we are so early that only one CPU is running, so + * it is the first online CPU in the cluster. + */ + if (IS_ENABLED(CONFIG_MIPS_CPS) && mips_cps_cluster_bootcfg) + local_cl_mask = &mips_cps_cluster_bootcfg[local_cl].cpumask; + else + return true; + + *first_cpu = cpumask_any_and_but(local_cl_mask, + cpu_online_mask, + smp_processor_id()); + return (*first_cpu >= nr_cpu_ids); +} |
