summaryrefslogtreecommitdiff
path: root/drivers/soc/samsung/exynos-pmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/soc/samsung/exynos-pmu.c')
-rw-r--r--drivers/soc/samsung/exynos-pmu.c456
1 files changed, 322 insertions, 134 deletions
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index fd8b6ac06656..d58376c38179 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -5,7 +5,10 @@
//
// Exynos - CPU PMU(Power Management Unit) support
-#include <linux/arm-smccc.h>
+#include <linux/array_size.h>
+#include <linux/bitmap.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpu_pm.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/mfd/core.h>
@@ -13,6 +16,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
+#include <linux/reboot.h>
#include <linux/regmap.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
@@ -20,18 +24,20 @@
#include "exynos-pmu.h"
-#define PMUALIVE_MASK GENMASK(13, 0)
-#define TENSOR_SET_BITS (BIT(15) | BIT(14))
-#define TENSOR_CLR_BITS BIT(15)
-#define TENSOR_SMC_PMU_SEC_REG 0x82000504
-#define TENSOR_PMUREG_READ 0
-#define TENSOR_PMUREG_WRITE 1
-#define TENSOR_PMUREG_RMW 2
-
struct exynos_pmu_context {
struct device *dev;
const struct exynos_pmu_data *pmu_data;
struct regmap *pmureg;
+ struct regmap *pmuintrgen;
+ /*
+ * Serialization lock for CPU hot plug and cpuidle ACPM hint
+ * programming. Also protects in_cpuhp, sys_insuspend & sys_inreboot
+ * flags.
+ */
+ raw_spinlock_t cpupm_lock;
+ unsigned long *in_cpuhp;
+ bool sys_insuspend;
+ bool sys_inreboot;
};
void __iomem *pmu_base_addr;
@@ -39,109 +45,6 @@ static struct exynos_pmu_context *pmu_context;
/* forward declaration */
static struct platform_driver exynos_pmu_driver;
-/*
- * Tensor SoCs are configured so that PMU_ALIVE registers can only be written
- * from EL3, but are still read accessible. As Linux needs to write some of
- * these registers, the following functions are provided and exposed via
- * regmap.
- *
- * Note: This SMC interface is known to be implemented on gs101 and derivative
- * SoCs.
- */
-
-/* Write to a protected PMU register. */
-static int tensor_sec_reg_write(void *context, unsigned int reg,
- unsigned int val)
-{
- struct arm_smccc_res res;
- unsigned long pmu_base = (unsigned long)context;
-
- arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg,
- TENSOR_PMUREG_WRITE, val, 0, 0, 0, 0, &res);
-
- /* returns -EINVAL if access isn't allowed or 0 */
- if (res.a0)
- pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0);
-
- return (int)res.a0;
-}
-
-/* Read/Modify/Write a protected PMU register. */
-static int tensor_sec_reg_rmw(void *context, unsigned int reg,
- unsigned int mask, unsigned int val)
-{
- struct arm_smccc_res res;
- unsigned long pmu_base = (unsigned long)context;
-
- arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg,
- TENSOR_PMUREG_RMW, mask, val, 0, 0, 0, &res);
-
- /* returns -EINVAL if access isn't allowed or 0 */
- if (res.a0)
- pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0);
-
- return (int)res.a0;
-}
-
-/*
- * Read a protected PMU register. All PMU registers can be read by Linux.
- * Note: The SMC read register is not used, as only registers that can be
- * written are readable via SMC.
- */
-static int tensor_sec_reg_read(void *context, unsigned int reg,
- unsigned int *val)
-{
- *val = pmu_raw_readl(reg);
- return 0;
-}
-
-/*
- * For SoCs that have set/clear bit hardware this function can be used when
- * the PMU register will be accessed by multiple masters.
- *
- * For example, to set bits 13:8 in PMU reg offset 0x3e80
- * tensor_set_bits_atomic(ctx, 0x3e80, 0x3f00, 0x3f00);
- *
- * Set bit 8, and clear bits 13:9 PMU reg offset 0x3e80
- * tensor_set_bits_atomic(0x3e80, 0x100, 0x3f00);
- */
-static int tensor_set_bits_atomic(void *ctx, unsigned int offset, u32 val,
- u32 mask)
-{
- int ret;
- unsigned int i;
-
- for (i = 0; i < 32; i++) {
- if (!(mask & BIT(i)))
- continue;
-
- offset &= ~TENSOR_SET_BITS;
-
- if (val & BIT(i))
- offset |= TENSOR_SET_BITS;
- else
- offset |= TENSOR_CLR_BITS;
-
- ret = tensor_sec_reg_write(ctx, offset, i);
- if (ret)
- return ret;
- }
- return ret;
-}
-
-static int tensor_sec_update_bits(void *ctx, unsigned int reg,
- unsigned int mask, unsigned int val)
-{
- /*
- * Use atomic operations for PMU_ALIVE registers (offset 0~0x3FFF)
- * as the target registers can be accessed by multiple masters.
- */
- if (reg > PMUALIVE_MASK)
- return tensor_sec_reg_rmw(ctx, reg, mask, val);
-
- return tensor_set_bits_atomic(ctx, reg, val, mask);
-}
-
void pmu_raw_writel(u32 val, u32 offset)
{
writel_relaxed(val, pmu_base_addr + offset);
@@ -202,20 +105,15 @@ static const struct regmap_config regmap_smccfg = {
.reg_read = tensor_sec_reg_read,
.reg_write = tensor_sec_reg_write,
.reg_update_bits = tensor_sec_update_bits,
+ .use_raw_spinlock = true,
};
-static const struct regmap_config regmap_mmiocfg = {
- .name = "pmu_regs",
+static const struct regmap_config regmap_pmu_intr = {
+ .name = "pmu_intr_gen",
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .fast_io = true,
- .use_single_read = true,
- .use_single_write = true,
-};
-
-static const struct exynos_pmu_data gs101_pmu_data = {
- .pmu_secure = true
+ .use_raw_spinlock = true,
};
/*
@@ -290,7 +188,6 @@ EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap);
struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np,
const char *propname)
{
- struct exynos_pmu_context *ctx;
struct device_node *pmu_np;
struct device *dev;
@@ -316,12 +213,266 @@ struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np,
if (!dev)
return ERR_PTR(-EPROBE_DEFER);
- ctx = dev_get_drvdata(dev);
+ put_device(dev);
- return ctx->pmureg;
+ return syscon_node_to_regmap(pmu_np);
}
EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle);
+/*
+ * CPU_INFORM register "hint" values are required to be programmed in addition to
+ * the standard PSCI calls to have functional CPU hotplug and CPU idle states.
+ * This is required to workaround limitations in the el3mon/ACPM firmware.
+ */
+#define CPU_INFORM_CLEAR 0
+#define CPU_INFORM_C2 1
+
+/*
+ * __gs101_cpu_pmu_ prefix functions are common code shared by CPU PM notifiers
+ * (CPUIdle) and CPU hotplug callbacks. Functions should be called with IRQs
+ * disabled and cpupm_lock held.
+ */
+static int __gs101_cpu_pmu_online(unsigned int cpu)
+ __must_hold(&pmu_context->cpupm_lock)
+{
+ unsigned int cpuhint = smp_processor_id();
+ u32 reg, mask;
+
+ /* clear cpu inform hint */
+ regmap_write(pmu_context->pmureg, GS101_CPU_INFORM(cpuhint),
+ CPU_INFORM_CLEAR);
+
+ mask = BIT(cpu);
+
+ regmap_update_bits(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_ENABLE,
+ mask, (0 << cpu));
+
+ regmap_read(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_UPEND, &reg);
+
+ regmap_write(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_CLEAR,
+ reg & mask);
+
+ return 0;
+}
+
+/* Called from CPU PM notifier (CPUIdle code path) with IRQs disabled */
+static int gs101_cpu_pmu_online(void)
+{
+ int cpu;
+
+ raw_spin_lock(&pmu_context->cpupm_lock);
+
+ if (pmu_context->sys_inreboot) {
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+ return NOTIFY_OK;
+ }
+
+ cpu = smp_processor_id();
+ __gs101_cpu_pmu_online(cpu);
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+
+ return NOTIFY_OK;
+}
+
+/* Called from CPU hot plug callback with IRQs enabled */
+static int gs101_cpuhp_pmu_online(unsigned int cpu)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags);
+
+ __gs101_cpu_pmu_online(cpu);
+ /*
+ * Mark this CPU as having finished the hotplug.
+ * This means this CPU can now enter C2 idle state.
+ */
+ clear_bit(cpu, pmu_context->in_cpuhp);
+ raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags);
+
+ return 0;
+}
+
+/* Common function shared by both CPU hot plug and CPUIdle */
+static int __gs101_cpu_pmu_offline(unsigned int cpu)
+ __must_hold(&pmu_context->cpupm_lock)
+{
+ unsigned int cpuhint = smp_processor_id();
+ u32 reg, mask;
+
+ /* set cpu inform hint */
+ regmap_write(pmu_context->pmureg, GS101_CPU_INFORM(cpuhint),
+ CPU_INFORM_C2);
+
+ mask = BIT(cpu);
+ regmap_update_bits(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_ENABLE,
+ mask, BIT(cpu));
+
+ regmap_read(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_UPEND, &reg);
+ regmap_write(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_CLEAR,
+ reg & mask);
+
+ mask = (BIT(cpu + 8));
+ regmap_read(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_UPEND, &reg);
+ regmap_write(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_CLEAR,
+ reg & mask);
+
+ return 0;
+}
+
+/* Called from CPU PM notifier (CPUIdle code path) with IRQs disabled */
+static int gs101_cpu_pmu_offline(void)
+{
+ int cpu;
+
+ raw_spin_lock(&pmu_context->cpupm_lock);
+ cpu = smp_processor_id();
+
+ if (test_bit(cpu, pmu_context->in_cpuhp)) {
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+ return NOTIFY_BAD;
+ }
+
+ /* Ignore CPU_PM_ENTER event in reboot or suspend sequence. */
+ if (pmu_context->sys_insuspend || pmu_context->sys_inreboot) {
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+ return NOTIFY_OK;
+ }
+
+ __gs101_cpu_pmu_offline(cpu);
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+
+ return NOTIFY_OK;
+}
+
+/* Called from CPU hot plug callback with IRQs enabled */
+static int gs101_cpuhp_pmu_offline(unsigned int cpu)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags);
+ /*
+ * Mark this CPU as entering hotplug. So as not to confuse
+ * ACPM the CPU entering hotplug should not enter C2 idle state.
+ */
+ set_bit(cpu, pmu_context->in_cpuhp);
+ __gs101_cpu_pmu_offline(cpu);
+
+ raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags);
+
+ return 0;
+}
+
+static int gs101_cpu_pm_notify_callback(struct notifier_block *self,
+ unsigned long action, void *v)
+{
+ switch (action) {
+ case CPU_PM_ENTER:
+ return gs101_cpu_pmu_offline();
+
+ case CPU_PM_EXIT:
+ return gs101_cpu_pmu_online();
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block gs101_cpu_pm_notifier = {
+ .notifier_call = gs101_cpu_pm_notify_callback,
+ /*
+ * We want to be called first, as the ACPM hint and handshake is what
+ * puts the CPU into C2.
+ */
+ .priority = INT_MAX
+};
+
+static int exynos_cpupm_reboot_notifier(struct notifier_block *nb,
+ unsigned long event, void *v)
+{
+ unsigned long flags;
+
+ switch (event) {
+ case SYS_POWER_OFF:
+ case SYS_RESTART:
+ raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags);
+ pmu_context->sys_inreboot = true;
+ raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block exynos_cpupm_reboot_nb = {
+ .priority = INT_MAX,
+ .notifier_call = exynos_cpupm_reboot_notifier,
+};
+
+static int setup_cpuhp_and_cpuidle(struct device *dev)
+{
+ struct device_node *intr_gen_node;
+ struct resource intrgen_res;
+ void __iomem *virt_addr;
+ int ret, cpu;
+
+ intr_gen_node = of_parse_phandle(dev->of_node,
+ "google,pmu-intr-gen-syscon", 0);
+ if (!intr_gen_node) {
+ /*
+ * To maintain support for older DTs that didn't specify syscon
+ * phandle just issue a warning rather than fail to probe.
+ */
+ dev_warn(dev, "pmu-intr-gen syscon unavailable\n");
+ return 0;
+ }
+
+ /*
+ * To avoid lockdep issues (CPU PM notifiers use raw spinlocks) create
+ * a mmio regmap for pmu-intr-gen that uses raw spinlocks instead of
+ * syscon provided regmap.
+ */
+ ret = of_address_to_resource(intr_gen_node, 0, &intrgen_res);
+ of_node_put(intr_gen_node);
+
+ virt_addr = devm_ioremap(dev, intrgen_res.start,
+ resource_size(&intrgen_res));
+ if (!virt_addr)
+ return -ENOMEM;
+
+ pmu_context->pmuintrgen = devm_regmap_init_mmio(dev, virt_addr,
+ &regmap_pmu_intr);
+ if (IS_ERR(pmu_context->pmuintrgen)) {
+ dev_err(dev, "failed to initialize pmu-intr-gen regmap\n");
+ return PTR_ERR(pmu_context->pmuintrgen);
+ }
+
+ /* register custom mmio regmap with syscon */
+ ret = of_syscon_register_regmap(intr_gen_node,
+ pmu_context->pmuintrgen);
+ if (ret)
+ return ret;
+
+ pmu_context->in_cpuhp = devm_bitmap_zalloc(dev, num_possible_cpus(),
+ GFP_KERNEL);
+ if (!pmu_context->in_cpuhp)
+ return -ENOMEM;
+
+ /* set PMU to power on */
+ for_each_online_cpu(cpu)
+ gs101_cpuhp_pmu_online(cpu);
+
+ /* register CPU hotplug callbacks */
+ cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "soc/exynos-pmu:prepare",
+ gs101_cpuhp_pmu_online, NULL);
+
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/exynos-pmu:online",
+ NULL, gs101_cpuhp_pmu_offline);
+
+ /* register CPU PM notifiers for cpuidle */
+ cpu_pm_register_notifier(&gs101_cpu_pm_notifier);
+ register_reboot_notifier(&exynos_cpupm_reboot_nb);
+ return 0;
+}
+
static int exynos_pmu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -351,25 +502,40 @@ static int exynos_pmu_probe(struct platform_device *pdev)
pmu_regmcfg = regmap_smccfg;
pmu_regmcfg.max_register = resource_size(res) -
pmu_regmcfg.reg_stride;
+ pmu_regmcfg.wr_table = pmu_context->pmu_data->wr_table;
+ pmu_regmcfg.rd_table = pmu_context->pmu_data->rd_table;
+
/* Need physical address for SMC call */
regmap = devm_regmap_init(dev, NULL,
(void *)(uintptr_t)res->start,
&pmu_regmcfg);
+
+ if (IS_ERR(regmap))
+ return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
+ "regmap init failed\n");
+
+ ret = of_syscon_register_regmap(dev->of_node, regmap);
+ if (ret)
+ return ret;
} else {
- /* All other SoCs use a MMIO regmap */
- pmu_regmcfg = regmap_mmiocfg;
- pmu_regmcfg.max_register = resource_size(res) -
- pmu_regmcfg.reg_stride;
- regmap = devm_regmap_init_mmio(dev, pmu_base_addr,
- &pmu_regmcfg);
+ /* let syscon create mmio regmap */
+ regmap = syscon_node_to_regmap(dev->of_node);
+ if (IS_ERR(regmap))
+ return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
+ "syscon_node_to_regmap failed\n");
}
- if (IS_ERR(regmap))
- return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
- "regmap init failed\n");
-
pmu_context->pmureg = regmap;
pmu_context->dev = dev;
+ raw_spin_lock_init(&pmu_context->cpupm_lock);
+ pmu_context->sys_inreboot = false;
+ pmu_context->sys_insuspend = false;
+
+ if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_cpuhp) {
+ ret = setup_cpuhp_and_cpuidle(dev);
+ if (ret)
+ return ret;
+ }
if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_init)
pmu_context->pmu_data->pmu_init();
@@ -388,10 +554,32 @@ static int exynos_pmu_probe(struct platform_device *pdev)
return 0;
}
+static int exynos_cpupm_suspend_noirq(struct device *dev)
+{
+ raw_spin_lock(&pmu_context->cpupm_lock);
+ pmu_context->sys_insuspend = true;
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+ return 0;
+}
+
+static int exynos_cpupm_resume_noirq(struct device *dev)
+{
+ raw_spin_lock(&pmu_context->cpupm_lock);
+ pmu_context->sys_insuspend = false;
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+ return 0;
+}
+
+static const struct dev_pm_ops cpupm_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_cpupm_suspend_noirq,
+ exynos_cpupm_resume_noirq)
+};
+
static struct platform_driver exynos_pmu_driver = {
.driver = {
.name = "exynos-pmu",
.of_match_table = exynos_pmu_of_device_ids,
+ .pm = pm_sleep_ptr(&cpupm_pm_ops),
},
.probe = exynos_pmu_probe,
};