summaryrefslogtreecommitdiff
path: root/arch/arm/mach-exynos
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-exynos')
-rw-r--r--arch/arm/mach-exynos/Kconfig2
-rw-r--r--arch/arm/mach-exynos/common.h10
-rw-r--r--arch/arm/mach-exynos/exynos.c62
-rw-r--r--arch/arm/mach-exynos/firmware.c83
-rw-r--r--arch/arm/mach-exynos/include/mach/dma.h26
-rw-r--r--arch/arm/mach-exynos/include/mach/map.h3
-rw-r--r--arch/arm/mach-exynos/mcpm-exynos.c247
-rw-r--r--arch/arm/mach-exynos/platsmp.c28
-rw-r--r--arch/arm/mach-exynos/pm.c150
-rw-r--r--arch/arm/mach-exynos/pm_domains.c57
-rw-r--r--arch/arm/mach-exynos/regs-pmu.h6
-rw-r--r--arch/arm/mach-exynos/regs-sys.h22
-rw-r--r--arch/arm/mach-exynos/sleep.S67
-rw-r--r--arch/arm/mach-exynos/smc.h9
-rw-r--r--arch/arm/mach-exynos/suspend.c241
15 files changed, 660 insertions, 353 deletions
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 603820e5aba7..81064cd61a0a 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -123,7 +123,7 @@ config SOC_EXYNOS5800
config EXYNOS5420_MCPM
bool "Exynos5420 Multi-Cluster PM support"
depends on MCPM && SOC_EXYNOS5420
- select ARM_CCI
+ select ARM_CCI400_PORT_CTRL
select ARM_CPU_SUSPEND
help
This is needed to provide CPU and cluster power management
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
index 865f878063cc..acd5b560b728 100644
--- a/arch/arm/mach-exynos/common.h
+++ b/arch/arm/mach-exynos/common.h
@@ -13,6 +13,7 @@
#define __ARCH_ARM_MACH_EXYNOS_COMMON_H
#include <linux/of.h>
+#include <linux/platform_data/cpuidle-exynos.h>
#define EXYNOS3250_SOC_ID 0xE3472000
#define EXYNOS3_SOC_MASK 0xFFFFF000
@@ -125,6 +126,12 @@ enum {
void exynos_firmware_init(void);
+/* CPU BOOT mode flag for Exynos3250 SoC bootloader */
+#define C2_STATE (1 << 3)
+
+void exynos_set_boot_flag(unsigned int cpu, unsigned int mode);
+void exynos_clear_boot_flag(unsigned int cpu, unsigned int mode);
+
extern u32 exynos_get_eint_wake_mask(void);
#ifdef CONFIG_PM_SLEEP
@@ -150,8 +157,11 @@ extern void exynos_pm_central_suspend(void);
extern int exynos_pm_central_resume(void);
extern void exynos_enter_aftr(void);
+extern struct cpuidle_exynos_data cpuidle_coupled_exynos_data;
+
extern void s5p_init_cpu(void __iomem *cpuid_addr);
extern unsigned int samsung_rev(void);
+extern void __iomem *cpu_boot_reg_base(void);
static inline void pmu_raw_writel(u32 val, u32 offset)
{
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index c13d0837fa8c..bcde0dd668df 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -27,20 +27,16 @@
#include <asm/mach/map.h>
#include <asm/memory.h>
+#include <mach/map.h>
+
#include "common.h"
#include "mfc.h"
#include "regs-pmu.h"
-#include "regs-sys.h"
void __iomem *pmu_base_addr;
static struct map_desc exynos4_iodesc[] __initdata = {
{
- .virtual = (unsigned long)S3C_VA_SYS,
- .pfn = __phys_to_pfn(EXYNOS4_PA_SYSCON),
- .length = SZ_64K,
- .type = MT_DEVICE,
- }, {
.virtual = (unsigned long)S5P_VA_SROMC,
.pfn = __phys_to_pfn(EXYNOS4_PA_SROMC),
.length = SZ_4K,
@@ -70,11 +66,6 @@ static struct map_desc exynos4_iodesc[] __initdata = {
static struct map_desc exynos5_iodesc[] __initdata = {
{
- .virtual = (unsigned long)S3C_VA_SYS,
- .pfn = __phys_to_pfn(EXYNOS5_PA_SYSCON),
- .length = SZ_64K,
- .type = MT_DEVICE,
- }, {
.virtual = (unsigned long)S5P_VA_SROMC,
.pfn = __phys_to_pfn(EXYNOS5_PA_SROMC),
.length = SZ_4K,
@@ -175,16 +166,14 @@ static void __init exynos_init_io(void)
exynos_map_io();
}
+/*
+ * Apparently, these SoCs are not able to wake-up from suspend using
+ * the PMU. Too bad. Should they suddenly become capable of such a
+ * feat, the matches below should be moved to suspend.c.
+ */
static const struct of_device_id exynos_dt_pmu_match[] = {
- { .compatible = "samsung,exynos3250-pmu" },
- { .compatible = "samsung,exynos4210-pmu" },
- { .compatible = "samsung,exynos4212-pmu" },
- { .compatible = "samsung,exynos4412-pmu" },
- { .compatible = "samsung,exynos4415-pmu" },
- { .compatible = "samsung,exynos5250-pmu" },
{ .compatible = "samsung,exynos5260-pmu" },
{ .compatible = "samsung,exynos5410-pmu" },
- { .compatible = "samsung,exynos5420-pmu" },
{ /*sentinel*/ },
};
@@ -195,9 +184,6 @@ static void exynos_map_pmu(void)
np = of_find_matching_node(NULL, exynos_dt_pmu_match);
if (np)
pmu_base_addr = of_iomap(np, 0);
-
- if (!pmu_base_addr)
- panic("failed to find exynos pmu register\n");
}
static void __init exynos_init_irq(void)
@@ -213,32 +199,6 @@ static void __init exynos_init_irq(void)
static void __init exynos_dt_machine_init(void)
{
- struct device_node *i2c_np;
- const char *i2c_compat = "samsung,s3c2440-i2c";
- unsigned int tmp;
- int id;
-
- /*
- * Exynos5's legacy i2c controller and new high speed i2c
- * controller have muxed interrupt sources. By default the
- * interrupts for 4-channel HS-I2C controller are enabled.
- * If node for first four channels of legacy i2c controller
- * are available then re-configure the interrupts via the
- * system register.
- */
- if (soc_is_exynos5()) {
- for_each_compatible_node(i2c_np, NULL, i2c_compat) {
- if (of_device_is_available(i2c_np)) {
- id = of_alias_get_id(i2c_np, "i2c");
- if (id < 4) {
- tmp = readl(EXYNOS5_SYS_I2C_CFG);
- writel(tmp & ~(0x1 << id),
- EXYNOS5_SYS_I2C_CFG);
- }
- }
- }
- }
-
/*
* This is called from smp_prepare_cpus if we've built for SMP, but
* we still need to set it up for PM and firmware ops if not.
@@ -246,10 +206,15 @@ static void __init exynos_dt_machine_init(void)
if (!IS_ENABLED(CONFIG_SMP))
exynos_sysram_init();
+#if defined(CONFIG_SMP) && defined(CONFIG_ARM_EXYNOS_CPUIDLE)
+ if (of_machine_is_compatible("samsung,exynos4210"))
+ exynos_cpuidle.dev.platform_data = &cpuidle_coupled_exynos_data;
+#endif
if (of_machine_is_compatible("samsung,exynos4210") ||
of_machine_is_compatible("samsung,exynos4212") ||
(of_machine_is_compatible("samsung,exynos4412") &&
of_machine_is_compatible("samsung,trats2")) ||
+ of_machine_is_compatible("samsung,exynos3250") ||
of_machine_is_compatible("samsung,exynos5250"))
platform_device_register(&exynos_cpuidle);
@@ -258,7 +223,7 @@ static void __init exynos_dt_machine_init(void)
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
-static char const *exynos_dt_compat[] __initconst = {
+static char const *const exynos_dt_compat[] __initconst = {
"samsung,exynos3",
"samsung,exynos3250",
"samsung,exynos4",
@@ -282,6 +247,7 @@ static void __init exynos_reserve(void)
"samsung,mfc-v5",
"samsung,mfc-v6",
"samsung,mfc-v7",
+ "samsung,mfc-v8",
};
for (i = 0; i < ARRAY_SIZE(mfc_mem); i++)
diff --git a/arch/arm/mach-exynos/firmware.c b/arch/arm/mach-exynos/firmware.c
index 766f57d2f029..1bd35763f12e 100644
--- a/arch/arm/mach-exynos/firmware.c
+++ b/arch/arm/mach-exynos/firmware.c
@@ -17,6 +17,7 @@
#include <asm/cacheflush.h>
#include <asm/cputype.h>
#include <asm/firmware.h>
+#include <asm/hardware/cache-l2x0.h>
#include <asm/suspend.h>
#include <mach/map.h>
@@ -47,7 +48,13 @@ static int exynos_do_idle(unsigned long mode)
__raw_writel(virt_to_phys(exynos_cpu_resume_ns),
sysram_ns_base_addr + 0x24);
__raw_writel(EXYNOS_AFTR_MAGIC, sysram_ns_base_addr + 0x20);
- exynos_smc(SMC_CMD_CPU0AFTR, 0, 0, 0);
+ if (soc_is_exynos3250()) {
+ exynos_smc(SMC_CMD_SAVE, OP_TYPE_CORE,
+ SMC_POWERSTATE_IDLE, 0);
+ exynos_smc(SMC_CMD_SHUTDOWN, OP_TYPE_CLUSTER,
+ SMC_POWERSTATE_IDLE, 0);
+ } else
+ exynos_smc(SMC_CMD_CPU0AFTR, 0, 0, 0);
break;
case FW_DO_IDLE_SLEEP:
exynos_smc(SMC_CMD_SLEEP, 0, 0, 0);
@@ -136,6 +143,43 @@ static const struct firmware_ops exynos_firmware_ops = {
.resume = IS_ENABLED(CONFIG_EXYNOS_CPU_SUSPEND) ? exynos_resume : NULL,
};
+static void exynos_l2_write_sec(unsigned long val, unsigned reg)
+{
+ static int l2cache_enabled;
+
+ switch (reg) {
+ case L2X0_CTRL:
+ if (val & L2X0_CTRL_EN) {
+ /*
+ * Before the cache can be enabled, due to firmware
+ * design, SMC_CMD_L2X0INVALL must be called.
+ */
+ if (!l2cache_enabled) {
+ exynos_smc(SMC_CMD_L2X0INVALL, 0, 0, 0);
+ l2cache_enabled = 1;
+ }
+ } else {
+ l2cache_enabled = 0;
+ }
+ exynos_smc(SMC_CMD_L2X0CTRL, val, 0, 0);
+ break;
+
+ case L2X0_DEBUG_CTRL:
+ exynos_smc(SMC_CMD_L2X0DEBUG, val, 0, 0);
+ break;
+
+ default:
+ WARN_ONCE(1, "%s: ignoring write to reg 0x%x\n", __func__, reg);
+ }
+}
+
+static void exynos_l2_configure(const struct l2x0_regs *regs)
+{
+ exynos_smc(SMC_CMD_L2X0SETUP1, regs->tag_latency, regs->data_latency,
+ regs->prefetch_ctrl);
+ exynos_smc(SMC_CMD_L2X0SETUP2, regs->pwr_ctrl, regs->aux_ctrl, 0);
+}
+
void __init exynos_firmware_init(void)
{
struct device_node *nd;
@@ -155,4 +199,41 @@ void __init exynos_firmware_init(void)
pr_info("Running under secure firmware.\n");
register_firmware_ops(&exynos_firmware_ops);
+
+ /*
+ * Exynos 4 SoCs (based on Cortex A9 and equipped with L2C-310),
+ * running under secure firmware, require certain registers of L2
+ * cache controller to be written in secure mode. Here .write_sec
+ * callback is provided to perform necessary SMC calls.
+ */
+ if (IS_ENABLED(CONFIG_CACHE_L2X0) &&
+ read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) {
+ outer_cache.write_sec = exynos_l2_write_sec;
+ outer_cache.configure = exynos_l2_configure;
+ }
+}
+
+#define REG_CPU_STATE_ADDR (sysram_ns_base_addr + 0x28)
+#define BOOT_MODE_MASK 0x1f
+
+void exynos_set_boot_flag(unsigned int cpu, unsigned int mode)
+{
+ unsigned int tmp;
+
+ tmp = __raw_readl(REG_CPU_STATE_ADDR + cpu * 4);
+
+ if (mode & BOOT_MODE_MASK)
+ tmp &= ~BOOT_MODE_MASK;
+
+ tmp |= mode;
+ __raw_writel(tmp, REG_CPU_STATE_ADDR + cpu * 4);
+}
+
+void exynos_clear_boot_flag(unsigned int cpu, unsigned int mode)
+{
+ unsigned int tmp;
+
+ tmp = __raw_readl(REG_CPU_STATE_ADDR + cpu * 4);
+ tmp &= ~mode;
+ __raw_writel(tmp, REG_CPU_STATE_ADDR + cpu * 4);
}
diff --git a/arch/arm/mach-exynos/include/mach/dma.h b/arch/arm/mach-exynos/include/mach/dma.h
deleted file mode 100644
index 201842a3769e..000000000000
--- a/arch/arm/mach-exynos/include/mach/dma.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2010 Samsung Electronics Co. Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __MACH_DMA_H
-#define __MACH_DMA_H
-
-/* This platform uses the common DMA API driver for PL330 */
-#include <plat/dma-pl330.h>
-
-#endif /* __MACH_DMA_H */
diff --git a/arch/arm/mach-exynos/include/mach/map.h b/arch/arm/mach-exynos/include/mach/map.h
index 1ad3f496ef56..de3ae59e1cfb 100644
--- a/arch/arm/mach-exynos/include/mach/map.h
+++ b/arch/arm/mach-exynos/include/mach/map.h
@@ -24,9 +24,6 @@
#define EXYNOS_PA_CHIPID 0x10000000
-#define EXYNOS4_PA_SYSCON 0x10010000
-#define EXYNOS5_PA_SYSCON 0x10050100
-
#define EXYNOS4_PA_CMU 0x10030000
#define EXYNOS5_PA_CMU 0x10010000
diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c
index b0d3c2e876fb..9bdf54795f05 100644
--- a/arch/arm/mach-exynos/mcpm-exynos.c
+++ b/arch/arm/mach-exynos/mcpm-exynos.c
@@ -61,25 +61,7 @@ static void __iomem *ns_sram_base_addr;
: "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
"r9", "r10", "lr", "memory")
-/*
- * We can't use regular spinlocks. In the switcher case, it is possible
- * for an outbound CPU to call power_down() after its inbound counterpart
- * is already live using the same logical CPU number which trips lockdep
- * debugging.
- */
-static arch_spinlock_t exynos_mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
-static int
-cpu_use_count[EXYNOS5420_CPUS_PER_CLUSTER][EXYNOS5420_NR_CLUSTERS];
-
-#define exynos_cluster_usecnt(cluster) \
- (cpu_use_count[0][cluster] + \
- cpu_use_count[1][cluster] + \
- cpu_use_count[2][cluster] + \
- cpu_use_count[3][cluster])
-
-#define exynos_cluster_unused(cluster) !exynos_cluster_usecnt(cluster)
-
-static int exynos_power_up(unsigned int cpu, unsigned int cluster)
+static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster)
{
unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
@@ -88,127 +70,65 @@ static int exynos_power_up(unsigned int cpu, unsigned int cluster)
cluster >= EXYNOS5420_NR_CLUSTERS)
return -EINVAL;
- /*
- * Since this is called with IRQs enabled, and no arch_spin_lock_irq
- * variant exists, we need to disable IRQs manually here.
- */
- local_irq_disable();
- arch_spin_lock(&exynos_mcpm_lock);
-
- cpu_use_count[cpu][cluster]++;
- if (cpu_use_count[cpu][cluster] == 1) {
- bool was_cluster_down =
- (exynos_cluster_usecnt(cluster) == 1);
-
- /*
- * Turn on the cluster (L2/COMMON) and then power on the
- * cores.
- */
- if (was_cluster_down)
- exynos_cluster_power_up(cluster);
-
- exynos_cpu_power_up(cpunr);
- } else if (cpu_use_count[cpu][cluster] != 2) {
- /*
- * The only possible values are:
- * 0 = CPU down
- * 1 = CPU (still) up
- * 2 = CPU requested to be up before it had a chance
- * to actually make itself down.
- * Any other value is a bug.
- */
- BUG();
- }
+ exynos_cpu_power_up(cpunr);
+ return 0;
+}
- arch_spin_unlock(&exynos_mcpm_lock);
- local_irq_enable();
+static int exynos_cluster_powerup(unsigned int cluster)
+{
+ pr_debug("%s: cluster %u\n", __func__, cluster);
+ if (cluster >= EXYNOS5420_NR_CLUSTERS)
+ return -EINVAL;
+ exynos_cluster_power_up(cluster);
return 0;
}
-/*
- * NOTE: This function requires the stack data to be visible through power down
- * and can only be executed on processors like A15 and A7 that hit the cache
- * with the C bit clear in the SCTLR register.
- */
-static void exynos_power_down(void)
+static void exynos_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
{
- unsigned int mpidr, cpu, cluster;
- bool last_man = false, skip_wfi = false;
- unsigned int cpunr;
-
- mpidr = read_cpuid_mpidr();
- cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
- cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
+ unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
cluster >= EXYNOS5420_NR_CLUSTERS);
+ exynos_cpu_power_down(cpunr);
+}
- __mcpm_cpu_going_down(cpu, cluster);
-
- arch_spin_lock(&exynos_mcpm_lock);
- BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
- cpu_use_count[cpu][cluster]--;
- if (cpu_use_count[cpu][cluster] == 0) {
- exynos_cpu_power_down(cpunr);
-
- if (exynos_cluster_unused(cluster)) {
- exynos_cluster_power_down(cluster);
- last_man = true;
- }
- } else if (cpu_use_count[cpu][cluster] == 1) {
- /*
- * A power_up request went ahead of us.
- * Even if we do not want to shut this CPU down,
- * the caller expects a certain state as if the WFI
- * was aborted. So let's continue with cache cleaning.
- */
- skip_wfi = true;
- } else {
- BUG();
- }
-
- if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
- arch_spin_unlock(&exynos_mcpm_lock);
-
- if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
- /*
- * On the Cortex-A15 we need to disable
- * L2 prefetching before flushing the cache.
- */
- asm volatile(
- "mcr p15, 1, %0, c15, c0, 3\n\t"
- "isb\n\t"
- "dsb"
- : : "r" (0x400));
- }
+static void exynos_cluster_powerdown_prepare(unsigned int cluster)
+{
+ pr_debug("%s: cluster %u\n", __func__, cluster);
+ BUG_ON(cluster >= EXYNOS5420_NR_CLUSTERS);
+ exynos_cluster_power_down(cluster);
+}
- /* Flush all cache levels for this cluster. */
- exynos_v7_exit_coherency_flush(all);
+static void exynos_cpu_cache_disable(void)
+{
+ /* Disable and flush the local CPU cache. */
+ exynos_v7_exit_coherency_flush(louis);
+}
+static void exynos_cluster_cache_disable(void)
+{
+ if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
/*
- * Disable cluster-level coherency by masking
- * incoming snoops and DVM messages:
+ * On the Cortex-A15 we need to disable
+ * L2 prefetching before flushing the cache.
*/
- cci_disable_port_by_cpu(mpidr);
-
- __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
- } else {
- arch_spin_unlock(&exynos_mcpm_lock);
-
- /* Disable and flush the local CPU cache. */
- exynos_v7_exit_coherency_flush(louis);
+ asm volatile(
+ "mcr p15, 1, %0, c15, c0, 3\n\t"
+ "isb\n\t"
+ "dsb"
+ : : "r" (0x400));
}
- __mcpm_cpu_down(cpu, cluster);
-
- /* Now we are prepared for power-down, do it: */
- if (!skip_wfi)
- wfi();
+ /* Flush all cache levels for this cluster. */
+ exynos_v7_exit_coherency_flush(all);
- /* Not dead at this point? Let our caller cope. */
+ /*
+ * Disable cluster-level coherency by masking
+ * incoming snoops and DVM messages:
+ */
+ cci_disable_port_by_cpu(read_cpuid_mpidr());
}
static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
@@ -222,10 +142,8 @@ static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
/* Wait for the core state to be OFF */
while (tries--) {
- if (ACCESS_ONCE(cpu_use_count[cpu][cluster]) == 0) {
- if ((exynos_cpu_power_state(cpunr) == 0))
- return 0; /* success: the CPU is halted */
- }
+ if ((exynos_cpu_power_state(cpunr) == 0))
+ return 0; /* success: the CPU is halted */
/* Otherwise, wait and retry: */
msleep(1);
@@ -234,63 +152,23 @@ static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
return -ETIMEDOUT; /* timeout */
}
-static void exynos_powered_up(void)
-{
- unsigned int mpidr, cpu, cluster;
-
- mpidr = read_cpuid_mpidr();
- cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
-
- arch_spin_lock(&exynos_mcpm_lock);
- if (cpu_use_count[cpu][cluster] == 0)
- cpu_use_count[cpu][cluster] = 1;
- arch_spin_unlock(&exynos_mcpm_lock);
-}
-
-static void exynos_suspend(u64 residency)
+static void exynos_cpu_is_up(unsigned int cpu, unsigned int cluster)
{
- unsigned int mpidr, cpunr;
-
- exynos_power_down();
-
- /*
- * Execution reaches here only if cpu did not power down.
- * Hence roll back the changes done in exynos_power_down function.
- *
- * CAUTION: "This function requires the stack data to be visible through
- * power down and can only be executed on processors like A15 and A7
- * that hit the cache with the C bit clear in the SCTLR register."
- */
- mpidr = read_cpuid_mpidr();
- cpunr = exynos_pmu_cpunr(mpidr);
-
- exynos_cpu_power_up(cpunr);
+ /* especially when resuming: make sure power control is set */
+ exynos_cpu_powerup(cpu, cluster);
}
static const struct mcpm_platform_ops exynos_power_ops = {
- .power_up = exynos_power_up,
- .power_down = exynos_power_down,
+ .cpu_powerup = exynos_cpu_powerup,
+ .cluster_powerup = exynos_cluster_powerup,
+ .cpu_powerdown_prepare = exynos_cpu_powerdown_prepare,
+ .cluster_powerdown_prepare = exynos_cluster_powerdown_prepare,
+ .cpu_cache_disable = exynos_cpu_cache_disable,
+ .cluster_cache_disable = exynos_cluster_cache_disable,
.wait_for_powerdown = exynos_wait_for_powerdown,
- .suspend = exynos_suspend,
- .powered_up = exynos_powered_up,
+ .cpu_is_up = exynos_cpu_is_up,
};
-static void __init exynos_mcpm_usage_count_init(void)
-{
- unsigned int mpidr, cpu, cluster;
-
- mpidr = read_cpuid_mpidr();
- cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
-
- pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
- BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
- cluster >= EXYNOS5420_NR_CLUSTERS);
-
- cpu_use_count[cpu][cluster] = 1;
-}
-
/*
* Enable cluster-level coherency, in preparation for turning on the MMU.
*/
@@ -302,19 +180,6 @@ static void __naked exynos_pm_power_up_setup(unsigned int affinity_level)
"b cci_enable_port_for_self");
}
-static void __init exynos_cache_off(void)
-{
- if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
- /* disable L2 prefetching on the Cortex-A15 */
- asm volatile(
- "mcr p15, 1, %0, c15, c0, 3\n\t"
- "isb\n\t"
- "dsb"
- : : "r" (0x400));
- }
- exynos_v7_exit_coherency_flush(all);
-}
-
static const struct of_device_id exynos_dt_mcpm_match[] = {
{ .compatible = "samsung,exynos5420" },
{ .compatible = "samsung,exynos5800" },
@@ -370,13 +235,11 @@ static int __init exynos_mcpm_init(void)
*/
pmu_raw_writel(EXYNOS5420_SWRESET_KFC_SEL, S5P_PMU_SPARE3);
- exynos_mcpm_usage_count_init();
-
ret = mcpm_platform_register(&exynos_power_ops);
if (!ret)
ret = mcpm_sync_init(exynos_pm_power_up_setup);
if (!ret)
- ret = mcpm_loopback(exynos_cache_off); /* turn on the CCI */
+ ret = mcpm_loopback(exynos_cluster_cache_disable); /* turn on the CCI */
if (ret) {
iounmap(ns_sram_base_addr);
return ret;
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index 7a1ebfeeeeb8..ebd135bb0995 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -126,8 +126,9 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
*/
void exynos_cpu_power_down(int cpu)
{
- if (cpu == 0 && (of_machine_is_compatible("samsung,exynos5420") ||
- of_machine_is_compatible("samsung,exynos5800"))) {
+ u32 core_conf;
+
+ if (cpu == 0 && (soc_is_exynos5420() || soc_is_exynos5800())) {
/*
* Bypass power down for CPU0 during suspend. Check for
* the SYS_PWR_REG value to decide if we are suspending
@@ -138,7 +139,10 @@ void exynos_cpu_power_down(int cpu)
if (!(val & S5P_CORE_LOCAL_PWR_EN))
return;
}
- pmu_raw_writel(0, EXYNOS_ARM_CORE_CONFIGURATION(cpu));
+
+ core_conf = pmu_raw_readl(EXYNOS_ARM_CORE_CONFIGURATION(cpu));
+ core_conf &= ~S5P_CORE_LOCAL_PWR_EN;
+ pmu_raw_writel(core_conf, EXYNOS_ARM_CORE_CONFIGURATION(cpu));
}
/**
@@ -149,7 +153,12 @@ void exynos_cpu_power_down(int cpu)
*/
void exynos_cpu_power_up(int cpu)
{
- pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
+ u32 core_conf = S5P_CORE_LOCAL_PWR_EN;
+
+ if (soc_is_exynos3250())
+ core_conf |= S5P_CORE_AUTOWAKEUP_EN;
+
+ pmu_raw_writel(core_conf,
EXYNOS_ARM_CORE_CONFIGURATION(cpu));
}
@@ -194,7 +203,7 @@ int exynos_cluster_power_state(int cluster)
S5P_CORE_LOCAL_PWR_EN);
}
-static inline void __iomem *cpu_boot_reg_base(void)
+void __iomem *cpu_boot_reg_base(void)
{
if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_1_1)
return pmu_base_addr + S5P_INFORM5;
@@ -227,6 +236,10 @@ static void exynos_core_restart(u32 core_id)
if (!of_machine_is_compatible("samsung,exynos3250"))
return;
+ while (!pmu_raw_readl(S5P_PMU_SPARE2))
+ udelay(10);
+ udelay(10);
+
val = pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(core_id));
val |= S5P_CORE_WAKEUP_FROM_LOCAL_CFG;
pmu_raw_writel(val, EXYNOS_ARM_CORE_STATUS(core_id));
@@ -347,7 +360,10 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
call_firmware_op(cpu_boot, core_id);
- arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+ if (soc_is_exynos3250())
+ dsb_sev();
+ else
+ arch_send_wakeup_ipi_mask(cpumask_of(cpu));
if (pen_release == -1)
break;
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index 86f3ecd88f78..cc75ab448be3 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -23,12 +23,13 @@
#include <asm/smp_scu.h>
#include <asm/suspend.h>
+#include <mach/map.h>
+
#include <plat/pm-common.h>
#include "common.h"
#include "exynos-pmu.h"
#include "regs-pmu.h"
-#include "regs-sys.h"
static inline void __iomem *exynos_boot_vector_addr(void)
{
@@ -97,10 +98,6 @@ void exynos_pm_central_suspend(void)
tmp = pmu_raw_readl(S5P_CENTRAL_SEQ_CONFIGURATION);
tmp &= ~S5P_CENTRAL_LOWPWR_CFG;
pmu_raw_writel(tmp, S5P_CENTRAL_SEQ_CONFIGURATION);
-
- /* Setting SEQ_OPTION register */
- pmu_raw_writel(S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0,
- S5P_CENTRAL_SEQ_OPTION);
}
int exynos_pm_central_resume(void)
@@ -130,6 +127,8 @@ int exynos_pm_central_resume(void)
static void exynos_set_wakeupmask(long mask)
{
pmu_raw_writel(mask, S5P_WAKEUP_MASK);
+ if (soc_is_exynos3250())
+ pmu_raw_writel(0x0, S5P_WAKEUP_MASK2);
}
static void exynos_cpu_set_boot_vector(long flags)
@@ -143,7 +142,7 @@ static int exynos_aftr_finisher(unsigned long flags)
{
int ret;
- exynos_set_wakeupmask(0x0000ff3e);
+ exynos_set_wakeupmask(soc_is_exynos3250() ? 0x40003ffe : 0x0000ff3e);
/* Set value of power down register for aftr mode */
exynos_sys_powerdown_conf(SYS_AFTR);
@@ -160,10 +159,22 @@ static int exynos_aftr_finisher(unsigned long flags)
void exynos_enter_aftr(void)
{
+ unsigned int cpuid = smp_processor_id();
+
cpu_pm_enter();
+ if (soc_is_exynos3250())
+ exynos_set_boot_flag(cpuid, C2_STATE);
+
exynos_pm_central_suspend();
+ if (of_machine_is_compatible("samsung,exynos4212") ||
+ of_machine_is_compatible("samsung,exynos4412")) {
+ /* Setting SEQ_OPTION register */
+ pmu_raw_writel(S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0,
+ S5P_CENTRAL_SEQ_OPTION);
+ }
+
cpu_suspend(0, exynos_aftr_finisher);
if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) {
@@ -174,5 +185,132 @@ void exynos_enter_aftr(void)
exynos_pm_central_resume();
+ if (soc_is_exynos3250())
+ exynos_clear_boot_flag(cpuid, C2_STATE);
+
cpu_pm_exit();
}
+
+#if defined(CONFIG_SMP) && defined(CONFIG_ARM_EXYNOS_CPUIDLE)
+static atomic_t cpu1_wakeup = ATOMIC_INIT(0);
+
+static int exynos_cpu0_enter_aftr(void)
+{
+ int ret = -1;
+
+ /*
+ * If the other cpu is powered on, we have to power it off, because
+ * the AFTR state won't work otherwise
+ */
+ if (cpu_online(1)) {
+ /*
+ * We reach a sync point with the coupled idle state, we know
+ * the other cpu will power down itself or will abort the
+ * sequence, let's wait for one of these to happen
+ */
+ while (exynos_cpu_power_state(1)) {
+ /*
+ * The other cpu may skip idle and boot back
+ * up again
+ */
+ if (atomic_read(&cpu1_wakeup))
+ goto abort;
+
+ /*
+ * The other cpu may bounce through idle and
+ * boot back up again, getting stuck in the
+ * boot rom code
+ */
+ if (__raw_readl(cpu_boot_reg_base()) == 0)
+ goto abort;
+
+ cpu_relax();
+ }
+ }
+
+ exynos_enter_aftr();
+ ret = 0;
+
+abort:
+ if (cpu_online(1)) {
+ /*
+ * Set the boot vector to something non-zero
+ */
+ __raw_writel(virt_to_phys(exynos_cpu_resume),
+ cpu_boot_reg_base());
+ dsb();
+
+ /*
+ * Turn on cpu1 and wait for it to be on
+ */
+ exynos_cpu_power_up(1);
+ while (exynos_cpu_power_state(1) != S5P_CORE_LOCAL_PWR_EN)
+ cpu_relax();
+
+ while (!atomic_read(&cpu1_wakeup)) {
+ /*
+ * Poke cpu1 out of the boot rom
+ */
+ __raw_writel(virt_to_phys(exynos_cpu_resume),
+ cpu_boot_reg_base());
+
+ arch_send_wakeup_ipi_mask(cpumask_of(1));
+ }
+ }
+
+ return ret;
+}
+
+static int exynos_wfi_finisher(unsigned long flags)
+{
+ cpu_do_idle();
+
+ return -1;
+}
+
+static int exynos_cpu1_powerdown(void)
+{
+ int ret = -1;
+
+ /*
+ * Idle sequence for cpu1
+ */
+ if (cpu_pm_enter())
+ goto cpu1_aborted;
+
+ /*
+ * Turn off cpu 1
+ */
+ exynos_cpu_power_down(1);
+
+ ret = cpu_suspend(0, exynos_wfi_finisher);
+
+ cpu_pm_exit();
+
+cpu1_aborted:
+ dsb();
+ /*
+ * Notify cpu 0 that cpu 1 is awake
+ */
+ atomic_set(&cpu1_wakeup, 1);
+
+ return ret;
+}
+
+static void exynos_pre_enter_aftr(void)
+{
+ __raw_writel(virt_to_phys(exynos_cpu_resume), cpu_boot_reg_base());
+}
+
+static void exynos_post_enter_aftr(void)
+{
+ atomic_set(&cpu1_wakeup, 0);
+}
+
+struct cpuidle_exynos_data cpuidle_coupled_exynos_data = {
+ .cpu0_enter_aftr = exynos_cpu0_enter_aftr,
+ .cpu1_powerdown = exynos_cpu1_powerdown,
+ .pre_enter_aftr = exynos_pre_enter_aftr,
+ .post_enter_aftr = exynos_post_enter_aftr,
+};
+#endif /* CONFIG_SMP && CONFIG_ARM_EXYNOS_CPUIDLE */
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 20f267121b3e..cbe56b35aea0 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -37,6 +37,7 @@ struct exynos_pm_domain {
struct clk *oscclk;
struct clk *clk[MAX_CLK_PER_DOMAIN];
struct clk *pclk[MAX_CLK_PER_DOMAIN];
+ struct clk *asb_clk[MAX_CLK_PER_DOMAIN];
};
static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
@@ -45,14 +46,19 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
void __iomem *base;
u32 timeout, pwr;
char *op;
+ int i;
pd = container_of(domain, struct exynos_pm_domain, pd);
base = pd->base;
+ for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
+ if (IS_ERR(pd->asb_clk[i]))
+ break;
+ clk_prepare_enable(pd->asb_clk[i]);
+ }
+
/* Set oscclk before powering off a domain*/
if (!power_on) {
- int i;
-
for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
if (IS_ERR(pd->clk[i]))
break;
@@ -81,8 +87,6 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
/* Restore clocks after powering on a domain*/
if (power_on) {
- int i;
-
for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
if (IS_ERR(pd->clk[i]))
break;
@@ -92,6 +96,12 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
}
}
+ for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
+ if (IS_ERR(pd->asb_clk[i]))
+ break;
+ clk_disable_unprepare(pd->asb_clk[i]);
+ }
+
return 0;
}
@@ -125,12 +135,21 @@ static __init int exynos4_pm_init_power_domain(void)
return -ENOMEM;
}
- pd->pd.name = kstrdup(np->name, GFP_KERNEL);
+ pd->pd.name = kstrdup(dev_name(dev), GFP_KERNEL);
pd->name = pd->pd.name;
pd->base = of_iomap(np, 0);
pd->pd.power_off = exynos_pd_power_off;
pd->pd.power_on = exynos_pd_power_on;
+ for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
+ char clk_name[8];
+
+ snprintf(clk_name, sizeof(clk_name), "asb%d", i);
+ pd->asb_clk[i] = clk_get(dev, clk_name);
+ if (IS_ERR(pd->asb_clk[i]))
+ break;
+ }
+
pd->oscclk = clk_get(dev, "oscclk");
if (IS_ERR(pd->oscclk))
goto no_clk;
@@ -161,6 +180,34 @@ no_clk:
of_genpd_add_provider_simple(np, &pd->pd);
}
+ /* Assign the child power domains to their parents */
+ for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") {
+ struct generic_pm_domain *child_domain, *parent_domain;
+ struct of_phandle_args args;
+
+ args.np = np;
+ args.args_count = 0;
+ child_domain = of_genpd_get_from_provider(&args);
+ if (!child_domain)
+ continue;
+
+ if (of_parse_phandle_with_args(np, "power-domains",
+ "#power-domain-cells", 0, &args) != 0)
+ continue;
+
+ parent_domain = of_genpd_get_from_provider(&args);
+ if (!parent_domain)
+ continue;
+
+ if (pm_genpd_add_subdomain(parent_domain, child_domain))
+ pr_warn("%s failed to add subdomain: %s\n",
+ parent_domain->name, child_domain->name);
+ else
+ pr_info("%s has as child subdomain: %s.\n",
+ parent_domain->name, child_domain->name);
+ of_node_put(np);
+ }
+
return 0;
}
arch_initcall(exynos4_pm_init_power_domain);
diff --git a/arch/arm/mach-exynos/regs-pmu.h b/arch/arm/mach-exynos/regs-pmu.h
index b5f4406fc1b5..b7614333d296 100644
--- a/arch/arm/mach-exynos/regs-pmu.h
+++ b/arch/arm/mach-exynos/regs-pmu.h
@@ -43,12 +43,14 @@
#define S5P_WAKEUP_STAT 0x0600
#define S5P_EINT_WAKEUP_MASK 0x0604
#define S5P_WAKEUP_MASK 0x0608
+#define S5P_WAKEUP_MASK2 0x0614
#define S5P_INFORM0 0x0800
#define S5P_INFORM1 0x0804
#define S5P_INFORM5 0x0814
#define S5P_INFORM6 0x0818
#define S5P_INFORM7 0x081C
+#define S5P_PMU_SPARE2 0x0908
#define S5P_PMU_SPARE3 0x090C
#define EXYNOS_IROM_DATA2 0x0988
@@ -160,12 +162,14 @@
#define EXYNOS5_L2RSTDISABLE_VALUE BIT(3)
#define S5P_PAD_RET_MAUDIO_OPTION 0x3028
+#define S5P_PAD_RET_MMC2_OPTION 0x30c8
#define S5P_PAD_RET_GPIO_OPTION 0x3108
#define S5P_PAD_RET_UART_OPTION 0x3128
#define S5P_PAD_RET_MMCA_OPTION 0x3148
#define S5P_PAD_RET_MMCB_OPTION 0x3168
#define S5P_PAD_RET_EBIA_OPTION 0x3188
#define S5P_PAD_RET_EBIB_OPTION 0x31A8
+#define S5P_PAD_RET_SPI_OPTION 0x31c8
#define S5P_PS_HOLD_CONTROL 0x330C
#define S5P_PS_HOLD_EN (1 << 31)
@@ -180,6 +184,7 @@
#define S5P_CORE_LOCAL_PWR_EN 0x3
#define S5P_CORE_WAKEUP_FROM_LOCAL_CFG (0x3 << 8)
+#define S5P_CORE_AUTOWAKEUP_EN (1 << 31)
/* Only for EXYNOS4210 */
#define S5P_CMU_CLKSTOP_LCD1_LOWPWR 0x1154
@@ -326,6 +331,7 @@
(EXYNOS3_ARM_CORE0_OPTION + ((_nr) * 0x80))
#define EXYNOS3_ARM_COMMON_OPTION 0x2408
+#define EXYNOS3_ARM_L2_OPTION 0x2608
#define EXYNOS3_TOP_PWR_OPTION 0x2C48
#define EXYNOS3_CORE_TOP_PWR_OPTION 0x2CA8
#define EXYNOS3_XUSBXTI_DURATION 0x341C
diff --git a/arch/arm/mach-exynos/regs-sys.h b/arch/arm/mach-exynos/regs-sys.h
deleted file mode 100644
index 84332b0dd7a6..000000000000
--- a/arch/arm/mach-exynos/regs-sys.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (c) 2014 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * EXYNOS - system register definition
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_REGS_SYS_H
-#define __ASM_ARCH_REGS_SYS_H __FILE__
-
-#include <mach/map.h>
-
-#define S5P_SYSREG(x) (S3C_VA_SYS + (x))
-
-/* For EXYNOS5 */
-#define EXYNOS5_SYS_I2C_CFG S5P_SYSREG(0x0234)
-
-#endif /* __ASM_ARCH_REGS_SYS_H */
diff --git a/arch/arm/mach-exynos/sleep.S b/arch/arm/mach-exynos/sleep.S
index e3c373082bbe..cf950790fbdc 100644
--- a/arch/arm/mach-exynos/sleep.S
+++ b/arch/arm/mach-exynos/sleep.S
@@ -16,19 +16,14 @@
*/
#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/hardware/cache-l2x0.h>
#include "smc.h"
#define CPU_MASK 0xff0ffff0
#define CPU_CORTEX_A9 0x410fc090
- /*
- * The following code is located into the .data section. This is to
- * allow l2x0_regs_phys to be accessed with a relative load while we
- * can't rely on any MMU translation. We could have put l2x0_regs_phys
- * in the .text section as well, but some setups might insist on it to
- * be truly read-only. (Reference from: arch/arm/kernel/sleep.S)
- */
- .data
+ .text
.align
/*
@@ -67,16 +62,68 @@ ENTRY(exynos_cpu_resume_ns)
cmp r0, r1
bne skip_cp15
- adr r0, cp15_save_power
+ adr r0, _cp15_save_power
ldr r1, [r0]
- adr r0, cp15_save_diag
+ ldr r1, [r0, r1]
+ adr r0, _cp15_save_diag
ldr r2, [r0]
+ ldr r2, [r0, r2]
mov r0, #SMC_CMD_C15RESUME
dsb
smc #0
+#ifdef CONFIG_CACHE_L2X0
+ adr r0, 1f
+ ldr r2, [r0]
+ add r0, r2, r0
+
+ /* Check that the address has been initialised. */
+ ldr r1, [r0, #L2X0_R_PHY_BASE]
+ teq r1, #0
+ beq skip_l2x0
+
+ /* Check if controller has been enabled. */
+ ldr r2, [r1, #L2X0_CTRL]
+ tst r2, #0x1
+ bne skip_l2x0
+
+ ldr r1, [r0, #L2X0_R_TAG_LATENCY]
+ ldr r2, [r0, #L2X0_R_DATA_LATENCY]
+ ldr r3, [r0, #L2X0_R_PREFETCH_CTRL]
+ mov r0, #SMC_CMD_L2X0SETUP1
+ smc #0
+
+ /* Reload saved regs pointer because smc corrupts registers. */
+ adr r0, 1f
+ ldr r2, [r0]
+ add r0, r2, r0
+
+ ldr r1, [r0, #L2X0_R_PWR_CTRL]
+ ldr r2, [r0, #L2X0_R_AUX_CTRL]
+ mov r0, #SMC_CMD_L2X0SETUP2
+ smc #0
+
+ mov r0, #SMC_CMD_L2X0INVALL
+ smc #0
+
+ mov r1, #1
+ mov r0, #SMC_CMD_L2X0CTRL
+ smc #0
+skip_l2x0:
+#endif /* CONFIG_CACHE_L2X0 */
skip_cp15:
b cpu_resume
ENDPROC(exynos_cpu_resume_ns)
+
+ .align
+_cp15_save_power:
+ .long cp15_save_power - .
+_cp15_save_diag:
+ .long cp15_save_diag - .
+#ifdef CONFIG_CACHE_L2X0
+1: .long l2x0_saved_regs - .
+#endif /* CONFIG_CACHE_L2X0 */
+
+ .data
.globl cp15_save_diag
cp15_save_diag:
.long 0 @ cp15 diagnostic
diff --git a/arch/arm/mach-exynos/smc.h b/arch/arm/mach-exynos/smc.h
index f7b82f9c1e21..c2845717bc8f 100644
--- a/arch/arm/mach-exynos/smc.h
+++ b/arch/arm/mach-exynos/smc.h
@@ -17,6 +17,8 @@
#define SMC_CMD_SLEEP (-3)
#define SMC_CMD_CPU1BOOT (-4)
#define SMC_CMD_CPU0AFTR (-5)
+#define SMC_CMD_SAVE (-6)
+#define SMC_CMD_SHUTDOWN (-7)
/* For CP15 Access */
#define SMC_CMD_C15RESUME (-11)
/* For L2 Cache Access */
@@ -32,4 +34,11 @@ extern void exynos_smc(u32 cmd, u32 arg1, u32 arg2, u32 arg3);
#endif /* __ASSEMBLY__ */
+/* op type for SMC_CMD_SAVE and SMC_CMD_SHUTDOWN */
+#define OP_TYPE_CORE 0x0
+#define OP_TYPE_CLUSTER 0x1
+
+/* Power State required for SMC_CMD_SAVE and SMC_CMD_SHUTDOWN */
+#define SMC_POWERSTATE_IDLE 0x1
+
#endif
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index f8e7dcd17055..3e6aea7f83af 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -18,7 +18,9 @@
#include <linux/syscore_ops.h>
#include <linux/cpu_pm.h>
#include <linux/io.h>
-#include <linux/irqchip/arm-gic.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
#include <linux/err.h>
#include <linux/regulator/machine.h>
@@ -34,7 +36,6 @@
#include "common.h"
#include "regs-pmu.h"
-#include "regs-sys.h"
#include "exynos-pmu.h"
#define S5P_CHECK_SLEEP 0x00000BAD
@@ -44,8 +45,8 @@
#define EXYNOS5420_CPU_STATE 0x28
/**
- * struct exynos_wkup_irq - Exynos GIC to PMU IRQ mapping
- * @hwirq: Hardware IRQ signal of the GIC
+ * struct exynos_wkup_irq - PMU IRQ to mask mapping
+ * @hwirq: Hardware IRQ signal of the PMU
* @mask: Mask in PMU wake-up mask register
*/
struct exynos_wkup_irq {
@@ -53,10 +54,6 @@ struct exynos_wkup_irq {
u32 mask;
};
-static struct sleep_save exynos5_sys_save[] = {
- SAVE_ITEM(EXYNOS5_SYS_I2C_CFG),
-};
-
static struct sleep_save exynos_core_save[] = {
/* SROM side */
SAVE_ITEM(S5P_SROM_BW),
@@ -68,8 +65,6 @@ static struct sleep_save exynos_core_save[] = {
struct exynos_pm_data {
const struct exynos_wkup_irq *wkup_irq;
- struct sleep_save *extra_save;
- int num_extra_save;
unsigned int wake_disable_mask;
unsigned int *release_ret_regs;
@@ -80,7 +75,7 @@ struct exynos_pm_data {
int (*cpu_suspend)(unsigned long);
};
-struct exynos_pm_data *pm_data;
+static const struct exynos_pm_data *pm_data;
static int exynos5420_cpu_state;
static unsigned int exynos_pmu_spare3;
@@ -91,19 +86,25 @@ static unsigned int exynos_pmu_spare3;
static u32 exynos_irqwake_intmask = 0xffffffff;
+static const struct exynos_wkup_irq exynos3250_wkup_irq[] = {
+ { 105, BIT(1) }, /* RTC alarm */
+ { 106, BIT(2) }, /* RTC tick */
+ { /* sentinel */ },
+};
+
static const struct exynos_wkup_irq exynos4_wkup_irq[] = {
- { 76, BIT(1) }, /* RTC alarm */
- { 77, BIT(2) }, /* RTC tick */
+ { 44, BIT(1) }, /* RTC alarm */
+ { 45, BIT(2) }, /* RTC tick */
{ /* sentinel */ },
};
static const struct exynos_wkup_irq exynos5250_wkup_irq[] = {
- { 75, BIT(1) }, /* RTC alarm */
- { 76, BIT(2) }, /* RTC tick */
+ { 43, BIT(1) }, /* RTC alarm */
+ { 44, BIT(2) }, /* RTC tick */
{ /* sentinel */ },
};
-unsigned int exynos_release_ret_regs[] = {
+static unsigned int exynos_release_ret_regs[] = {
S5P_PAD_RET_MAUDIO_OPTION,
S5P_PAD_RET_GPIO_OPTION,
S5P_PAD_RET_UART_OPTION,
@@ -114,7 +115,20 @@ unsigned int exynos_release_ret_regs[] = {
REG_TABLE_END,
};
-unsigned int exynos5420_release_ret_regs[] = {
+static unsigned int exynos3250_release_ret_regs[] = {
+ S5P_PAD_RET_MAUDIO_OPTION,
+ S5P_PAD_RET_GPIO_OPTION,
+ S5P_PAD_RET_UART_OPTION,
+ S5P_PAD_RET_MMCA_OPTION,
+ S5P_PAD_RET_MMCB_OPTION,
+ S5P_PAD_RET_EBIA_OPTION,
+ S5P_PAD_RET_EBIB_OPTION,
+ S5P_PAD_RET_MMC2_OPTION,
+ S5P_PAD_RET_SPI_OPTION,
+ REG_TABLE_END,
+};
+
+static unsigned int exynos5420_release_ret_regs[] = {
EXYNOS_PAD_RET_DRAM_OPTION,
EXYNOS_PAD_RET_MAUDIO_OPTION,
EXYNOS_PAD_RET_JTAG_OPTION,
@@ -153,6 +167,113 @@ static int exynos_irq_set_wake(struct irq_data *data, unsigned int state)
return -ENOENT;
}
+static struct irq_chip exynos_pmu_chip = {
+ .name = "PMU",
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_wake = exynos_irq_set_wake,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+};
+
+static int exynos_pmu_domain_xlate(struct irq_domain *domain,
+ struct device_node *controller,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (domain->of_node != controller)
+ return -EINVAL; /* Shouldn't happen, really... */
+ if (intsize != 3)
+ return -EINVAL; /* Not GIC compliant */
+ if (intspec[0] != 0)
+ return -EINVAL; /* No PPI should point to this domain */
+
+ *out_hwirq = intspec[1];
+ *out_type = intspec[2];
+ return 0;
+}
+
+static int exynos_pmu_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct of_phandle_args *args = data;
+ struct of_phandle_args parent_args;
+ irq_hw_number_t hwirq;
+ int i;
+
+ if (args->args_count != 3)
+ return -EINVAL; /* Not GIC compliant */
+ if (args->args[0] != 0)
+ return -EINVAL; /* No PPI should point to this domain */
+
+ hwirq = args->args[1];
+
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &exynos_pmu_chip, NULL);
+
+ parent_args = *args;
+ parent_args.np = domain->parent->of_node;
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_args);
+}
+
+static struct irq_domain_ops exynos_pmu_domain_ops = {
+ .xlate = exynos_pmu_domain_xlate,
+ .alloc = exynos_pmu_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int __init exynos_pmu_irq_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *parent_domain, *domain;
+
+ if (!parent) {
+ pr_err("%s: no parent, giving up\n", node->full_name);
+ return -ENODEV;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("%s: unable to obtain parent domain\n", node->full_name);
+ return -ENXIO;
+ }
+
+ pmu_base_addr = of_iomap(node, 0);
+
+ if (!pmu_base_addr) {
+ pr_err("%s: failed to find exynos pmu register\n",
+ node->full_name);
+ return -ENOMEM;
+ }
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0, 0,
+ node, &exynos_pmu_domain_ops,
+ NULL);
+ if (!domain) {
+ iounmap(pmu_base_addr);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+#define EXYNOS_PMU_IRQ(symbol, name) OF_DECLARE_2(irqchip, symbol, name, exynos_pmu_irq_init)
+
+EXYNOS_PMU_IRQ(exynos3250_pmu_irq, "samsung,exynos3250-pmu");
+EXYNOS_PMU_IRQ(exynos4210_pmu_irq, "samsung,exynos4210-pmu");
+EXYNOS_PMU_IRQ(exynos4212_pmu_irq, "samsung,exynos4212-pmu");
+EXYNOS_PMU_IRQ(exynos4412_pmu_irq, "samsung,exynos4412-pmu");
+EXYNOS_PMU_IRQ(exynos4415_pmu_irq, "samsung,exynos4415-pmu");
+EXYNOS_PMU_IRQ(exynos5250_pmu_irq, "samsung,exynos5250-pmu");
+EXYNOS_PMU_IRQ(exynos5420_pmu_irq, "samsung,exynos5420-pmu");
+
static int exynos_cpu_do_idle(void)
{
/* issue the standby signal into the pm unit. */
@@ -173,6 +294,12 @@ static int exynos_cpu_suspend(unsigned long arg)
return exynos_cpu_do_idle();
}
+static int exynos3250_cpu_suspend(unsigned long arg)
+{
+ flush_cache_all();
+ return exynos_cpu_do_idle();
+}
+
static int exynos5420_cpu_suspend(unsigned long arg)
{
/* MCPM works with HW CPU identifiers */
@@ -220,9 +347,22 @@ static void exynos_pm_prepare(void)
s3c_pm_do_save(exynos_core_save, ARRAY_SIZE(exynos_core_save));
- if (pm_data->extra_save)
- s3c_pm_do_save(pm_data->extra_save,
- pm_data->num_extra_save);
+ exynos_pm_enter_sleep_mode();
+
+ /* ensure at least INFORM0 has the resume address */
+ pmu_raw_writel(virt_to_phys(exynos_cpu_resume), S5P_INFORM0);
+}
+
+static void exynos3250_pm_prepare(void)
+{
+ unsigned int tmp;
+
+ /* Set wake-up mask registers */
+ exynos_pm_set_wakeup_mask();
+
+ tmp = pmu_raw_readl(EXYNOS3_ARM_L2_OPTION);
+ tmp &= ~EXYNOS5_OPTION_USE_RETENTION;
+ pmu_raw_writel(tmp, EXYNOS3_ARM_L2_OPTION);
exynos_pm_enter_sleep_mode();
@@ -282,6 +422,10 @@ static int exynos_pm_suspend(void)
{
exynos_pm_central_suspend();
+ /* Setting SEQ_OPTION register */
+ pmu_raw_writel(S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0,
+ S5P_CENTRAL_SEQ_OPTION);
+
if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
exynos_cpu_save_register();
@@ -325,10 +469,6 @@ static void exynos_pm_resume(void)
/* For release retention */
exynos_pm_release_retention();
- if (pm_data->extra_save)
- s3c_pm_do_restore_core(pm_data->extra_save,
- pm_data->num_extra_save);
-
s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
if (cpuid == ARM_CPU_PART_CORTEX_A9)
@@ -344,6 +484,28 @@ early_wakeup:
pmu_raw_writel(0x0, S5P_INFORM1);
}
+static void exynos3250_pm_resume(void)
+{
+ u32 cpuid = read_cpuid_part();
+
+ if (exynos_pm_central_resume())
+ goto early_wakeup;
+
+ /* For release retention */
+ exynos_pm_release_retention();
+
+ pmu_raw_writel(S5P_USE_STANDBY_WFI_ALL, S5P_CENTRAL_SEQ_OPTION);
+
+ if (call_firmware_op(resume) == -ENOSYS
+ && cpuid == ARM_CPU_PART_CORTEX_A9)
+ exynos_cpu_restore_register();
+
+early_wakeup:
+
+ /* Clear SLEEP mode set in INFORM1 */
+ pmu_raw_writel(0x0, S5P_INFORM1);
+}
+
static void exynos5420_prepare_pm_resume(void)
{
if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM))
@@ -483,6 +645,16 @@ static const struct platform_suspend_ops exynos_suspend_ops = {
.valid = suspend_valid_only_mem,
};
+static const struct exynos_pm_data exynos3250_pm_data = {
+ .wkup_irq = exynos3250_wkup_irq,
+ .wake_disable_mask = ((0xFF << 8) | (0x1F << 1)),
+ .release_ret_regs = exynos3250_release_ret_regs,
+ .pm_suspend = exynos_pm_suspend,
+ .pm_resume = exynos3250_pm_resume,
+ .pm_prepare = exynos3250_pm_prepare,
+ .cpu_suspend = exynos3250_cpu_suspend,
+};
+
static const struct exynos_pm_data exynos4_pm_data = {
.wkup_irq = exynos4_wkup_irq,
.wake_disable_mask = ((0xFF << 8) | (0x1F << 1)),
@@ -497,15 +669,13 @@ static const struct exynos_pm_data exynos5250_pm_data = {
.wkup_irq = exynos5250_wkup_irq,
.wake_disable_mask = ((0xFF << 8) | (0x1F << 1)),
.release_ret_regs = exynos_release_ret_regs,
- .extra_save = exynos5_sys_save,
- .num_extra_save = ARRAY_SIZE(exynos5_sys_save),
.pm_suspend = exynos_pm_suspend,
.pm_resume = exynos_pm_resume,
.pm_prepare = exynos_pm_prepare,
.cpu_suspend = exynos_cpu_suspend,
};
-static struct exynos_pm_data exynos5420_pm_data = {
+static const struct exynos_pm_data exynos5420_pm_data = {
.wkup_irq = exynos5250_wkup_irq,
.wake_disable_mask = (0x7F << 7) | (0x1F << 1),
.release_ret_regs = exynos5420_release_ret_regs,
@@ -516,8 +686,11 @@ static struct exynos_pm_data exynos5420_pm_data = {
.cpu_suspend = exynos5420_cpu_suspend,
};
-static struct of_device_id exynos_pmu_of_device_ids[] = {
+static const struct of_device_id exynos_pmu_of_device_ids[] __initconst = {
{
+ .compatible = "samsung,exynos3250-pmu",
+ .data = &exynos3250_pm_data,
+ }, {
.compatible = "samsung,exynos4210-pmu",
.data = &exynos4_pm_data,
}, {
@@ -541,17 +714,19 @@ static struct syscore_ops exynos_pm_syscore_ops;
void __init exynos_pm_init(void)
{
const struct of_device_id *match;
+ struct device_node *np;
u32 tmp;
- of_find_matching_node_and_match(NULL, exynos_pmu_of_device_ids, &match);
- if (!match) {
+ np = of_find_matching_node_and_match(NULL, exynos_pmu_of_device_ids, &match);
+ if (!np) {
pr_err("Failed to find PMU node\n");
return;
}
- pm_data = (struct exynos_pm_data *) match->data;
- /* Platform-specific GIC callback */
- gic_arch_extn.irq_set_wake = exynos_irq_set_wake;
+ if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL)))
+ pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
+
+ pm_data = (const struct exynos_pm_data *) match->data;
/* All wakeup disable */
tmp = pmu_raw_readl(S5P_WAKEUP_MASK);