summaryrefslogtreecommitdiff
path: root/drivers/soc/ti
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/soc/ti')
-rw-r--r--drivers/soc/ti/Kconfig11
-rw-r--r--drivers/soc/ti/Makefile2
-rw-r--r--drivers/soc/ti/k3-ringacc.c33
-rw-r--r--drivers/soc/ti/k3-socinfo.c1
-rw-r--r--drivers/soc/ti/knav_dma.c16
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c14
-rw-r--r--drivers/soc/ti/omap_prm.c274
-rw-r--r--drivers/soc/ti/pm33xx.c47
-rw-r--r--drivers/soc/ti/pruss.c354
-rw-r--r--drivers/soc/ti/smartreflex.c1045
-rw-r--r--drivers/soc/ti/ti_sci_pm_domains.c251
11 files changed, 1882 insertions, 166 deletions
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index e192fb788836..f5b82ffa637b 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -101,6 +101,17 @@ config TI_K3_SOCINFO
platforms to provide information about the SoC family and
variant to user space.
+config TI_PRUSS
+ tristate "TI PRU-ICSS Subsystem Platform drivers"
+ depends on SOC_AM33XX || SOC_AM43XX || SOC_DRA7XX || ARCH_KEYSTONE || ARCH_K3
+ select MFD_SYSCON
+ help
+ TI PRU-ICSS Subsystem platform specific support.
+
+ Say Y or M here to support the Programmable Realtime Unit (PRU)
+ processors on various TI SoCs. It's safe to say N here if you're
+ not interested in the PRU or if you are unsure.
+
endif # SOC_TI
config TI_SCI_INTA_MSI_DOMAIN
diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile
index 1110e5c98685..cc3c972fad2e 100644
--- a/drivers/soc/ti/Makefile
+++ b/drivers/soc/ti/Makefile
@@ -12,3 +12,5 @@ obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o
obj-$(CONFIG_TI_SCI_INTA_MSI_DOMAIN) += ti_sci_inta_msi.o
obj-$(CONFIG_TI_K3_RINGACC) += k3-ringacc.o
obj-$(CONFIG_TI_K3_SOCINFO) += k3-socinfo.o
+obj-$(CONFIG_TI_PRUSS) += pruss.o
+obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
index 6dcc21dde0cb..1147dc4c1d59 100644
--- a/drivers/soc/ti/k3-ringacc.c
+++ b/drivers/soc/ti/k3-ringacc.c
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/sys_soc.h>
#include <linux/soc/ti/k3-ringacc.h>
#include <linux/soc/ti/ti_sci_protocol.h>
#include <linux/soc/ti/ti_sci_inta_msi.h>
@@ -208,6 +209,15 @@ struct k3_ringacc {
const struct k3_ringacc_ops *ops;
};
+/**
+ * struct k3_ringacc - Rings accelerator SoC data
+ *
+ * @dma_ring_reset_quirk: DMA reset w/a enable
+ */
+struct k3_ringacc_soc_data {
+ unsigned dma_ring_reset_quirk:1;
+};
+
static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring)
{
return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES -
@@ -1051,9 +1061,6 @@ static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc)
return ret;
}
- ringacc->dma_ring_reset_quirk =
- of_property_read_bool(node, "ti,dma-ring-reset-quirk");
-
ringacc->tisci = ti_sci_get_by_phandle(node, "ti,sci");
if (IS_ERR(ringacc->tisci)) {
ret = PTR_ERR(ringacc->tisci);
@@ -1084,9 +1091,22 @@ static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc)
ringacc->rm_gp_range);
}
+static const struct k3_ringacc_soc_data k3_ringacc_soc_data_sr1 = {
+ .dma_ring_reset_quirk = 1,
+};
+
+static const struct soc_device_attribute k3_ringacc_socinfo[] = {
+ { .family = "AM65X",
+ .revision = "SR1.0",
+ .data = &k3_ringacc_soc_data_sr1
+ },
+ {/* sentinel */}
+};
+
static int k3_ringacc_init(struct platform_device *pdev,
struct k3_ringacc *ringacc)
{
+ const struct soc_device_attribute *soc;
void __iomem *base_fifo, *base_rt;
struct device *dev = &pdev->dev;
struct resource *res;
@@ -1103,6 +1123,13 @@ static int k3_ringacc_init(struct platform_device *pdev,
if (ret)
return ret;
+ soc = soc_device_match(k3_ringacc_socinfo);
+ if (soc && soc->data) {
+ const struct k3_ringacc_soc_data *soc_data = soc->data;
+
+ ringacc->dma_ring_reset_quirk = soc_data->dma_ring_reset_quirk;
+ }
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rt");
base_rt = devm_ioremap_resource(dev, res);
if (IS_ERR(base_rt))
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
index af0ba5288e58..bbbc2d2b7091 100644
--- a/drivers/soc/ti/k3-socinfo.c
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -39,6 +39,7 @@ static const struct k3_soc_id {
} k3_soc_ids[] = {
{ 0xBB5A, "AM65X" },
{ 0xBB64, "J721E" },
+ { 0xBB6D, "J7200" },
};
static int
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index 6285cd8efb21..8c863ecb1c60 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -355,7 +355,7 @@ static void dma_debug_show_devices(struct seq_file *s,
}
}
-static int dma_debug_show(struct seq_file *s, void *v)
+static int knav_dma_debug_show(struct seq_file *s, void *v)
{
struct knav_dma_device *dma;
@@ -370,17 +370,7 @@ static int dma_debug_show(struct seq_file *s, void *v)
return 0;
}
-static int knav_dma_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, dma_debug_show, NULL);
-}
-
-static const struct file_operations knav_dma_debug_ops = {
- .open = knav_dma_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(knav_dma_debug);
static int of_channel_match_helper(struct device_node *np, const char *name,
const char **dma_instance)
@@ -778,7 +768,7 @@ static int knav_dma_probe(struct platform_device *pdev)
}
debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL,
- &knav_dma_debug_ops);
+ &knav_dma_debug_fops);
device_ready = true;
return ret;
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index aa071d96ef36..a460f201bf8e 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -478,17 +478,7 @@ static int knav_queue_debug_show(struct seq_file *s, void *v)
return 0;
}
-static int knav_queue_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, knav_queue_debug_show, NULL);
-}
-
-static const struct file_operations knav_queue_debug_ops = {
- .open = knav_queue_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(knav_queue_debug);
static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout,
u32 flags)
@@ -1878,7 +1868,7 @@ static int knav_queue_probe(struct platform_device *pdev)
}
debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL,
- &knav_queue_debug_ops);
+ &knav_queue_debug_fops);
device_ready = true;
return 0;
diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c
index c9b3f9ebf0bb..980b04c38fd9 100644
--- a/drivers/soc/ti/omap_prm.c
+++ b/drivers/soc/ti/omap_prm.c
@@ -10,14 +10,39 @@
#include <linux/device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/reset-controller.h>
#include <linux/delay.h>
#include <linux/platform_data/ti-prm.h>
+enum omap_prm_domain_mode {
+ OMAP_PRMD_OFF,
+ OMAP_PRMD_RETENTION,
+ OMAP_PRMD_ON_INACTIVE,
+ OMAP_PRMD_ON_ACTIVE,
+};
+
+struct omap_prm_domain_map {
+ unsigned int usable_modes; /* Mask of hardware supported modes */
+ unsigned long statechange:1; /* Optional low-power state change */
+ unsigned long logicretstate:1; /* Optional logic off mode */
+};
+
+struct omap_prm_domain {
+ struct device *dev;
+ struct omap_prm *prm;
+ struct generic_pm_domain pd;
+ u16 pwrstctrl;
+ u16 pwrstst;
+ const struct omap_prm_domain_map *cap;
+ u32 pwrstctrl_saved;
+};
+
struct omap_rst_map {
s8 rst;
s8 st;
@@ -27,6 +52,9 @@ struct omap_prm_data {
u32 base;
const char *name;
const char *clkdm_name;
+ u16 pwrstctrl;
+ u16 pwrstst;
+ const struct omap_prm_domain_map *dmap;
u16 rstctrl;
u16 rstst;
const struct omap_rst_map *rstmap;
@@ -36,6 +64,7 @@ struct omap_prm_data {
struct omap_prm {
const struct omap_prm_data *data;
void __iomem *base;
+ struct omap_prm_domain *prmd;
};
struct omap_reset_data {
@@ -47,6 +76,7 @@ struct omap_reset_data {
struct device *dev;
};
+#define genpd_to_prm_domain(gpd) container_of(gpd, struct omap_prm_domain, pd)
#define to_omap_reset_data(p) container_of((p), struct omap_reset_data, rcdev)
#define OMAP_MAX_RESETS 8
@@ -58,6 +88,39 @@ struct omap_reset_data {
#define OMAP_PRM_HAS_RESETS (OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_RSTST)
+#define PRM_STATE_MAX_WAIT 10000
+#define PRM_LOGICRETSTATE BIT(2)
+#define PRM_LOWPOWERSTATECHANGE BIT(4)
+#define PRM_POWERSTATE_MASK OMAP_PRMD_ON_ACTIVE
+
+#define PRM_ST_INTRANSITION BIT(20)
+
+static const struct omap_prm_domain_map omap_prm_all = {
+ .usable_modes = BIT(OMAP_PRMD_ON_ACTIVE) | BIT(OMAP_PRMD_ON_INACTIVE) |
+ BIT(OMAP_PRMD_RETENTION) | BIT(OMAP_PRMD_OFF),
+ .statechange = 1,
+ .logicretstate = 1,
+};
+
+static const struct omap_prm_domain_map omap_prm_noinact = {
+ .usable_modes = BIT(OMAP_PRMD_ON_ACTIVE) | BIT(OMAP_PRMD_RETENTION) |
+ BIT(OMAP_PRMD_OFF),
+ .statechange = 1,
+ .logicretstate = 1,
+};
+
+static const struct omap_prm_domain_map omap_prm_nooff = {
+ .usable_modes = BIT(OMAP_PRMD_ON_ACTIVE) | BIT(OMAP_PRMD_ON_INACTIVE) |
+ BIT(OMAP_PRMD_RETENTION),
+ .statechange = 1,
+ .logicretstate = 1,
+};
+
+static const struct omap_prm_domain_map omap_prm_onoff_noauto = {
+ .usable_modes = BIT(OMAP_PRMD_ON_ACTIVE) | BIT(OMAP_PRMD_OFF),
+ .statechange = 1,
+};
+
static const struct omap_rst_map rst_map_0[] = {
{ .rst = 0, .st = 0 },
{ .rst = -1 },
@@ -78,6 +141,10 @@ static const struct omap_rst_map rst_map_012[] = {
static const struct omap_prm_data omap4_prm_data[] = {
{ .name = "tesla", .base = 0x4a306400, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ {
+ .name = "abe", .base = 0x4a306500,
+ .pwrstctrl = 0, .pwrstst = 0x4, .dmap = &omap_prm_all,
+ },
{ .name = "core", .base = 0x4a306700, .rstctrl = 0x210, .rstst = 0x214, .clkdm_name = "ducati", .rstmap = rst_map_012 },
{ .name = "ivahd", .base = 0x4a306f00, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012 },
{ .name = "device", .base = 0x4a307b00, .rstctrl = 0x0, .rstst = 0x4, .rstmap = rst_map_01, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
@@ -86,6 +153,10 @@ static const struct omap_prm_data omap4_prm_data[] = {
static const struct omap_prm_data omap5_prm_data[] = {
{ .name = "dsp", .base = 0x4ae06400, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ {
+ .name = "abe", .base = 0x4ae06500,
+ .pwrstctrl = 0, .pwrstst = 0x4, .dmap = &omap_prm_nooff,
+ },
{ .name = "core", .base = 0x4ae06700, .rstctrl = 0x210, .rstst = 0x214, .clkdm_name = "ipu", .rstmap = rst_map_012 },
{ .name = "iva", .base = 0x4ae07200, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012 },
{ .name = "device", .base = 0x4ae07c00, .rstctrl = 0x0, .rstst = 0x4, .rstmap = rst_map_01, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
@@ -119,7 +190,11 @@ static const struct omap_prm_data am3_prm_data[] = {
{ .name = "per", .base = 0x44e00c00, .rstctrl = 0x0, .rstmap = am3_per_rst_map, .flags = OMAP_PRM_HAS_RSTCTRL, .clkdm_name = "pruss_ocp" },
{ .name = "wkup", .base = 0x44e00d00, .rstctrl = 0x0, .rstst = 0xc, .rstmap = am3_wkup_rst_map, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
{ .name = "device", .base = 0x44e00f00, .rstctrl = 0x0, .rstst = 0x8, .rstmap = rst_map_01, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
- { .name = "gfx", .base = 0x44e01100, .rstctrl = 0x4, .rstst = 0x14, .rstmap = rst_map_0, .clkdm_name = "gfx_l3" },
+ {
+ .name = "gfx", .base = 0x44e01100,
+ .pwrstctrl = 0, .pwrstst = 0x10, .dmap = &omap_prm_noinact,
+ .rstctrl = 0x4, .rstst = 0x14, .rstmap = rst_map_0, .clkdm_name = "gfx_l3",
+ },
{ },
};
@@ -135,7 +210,11 @@ static const struct omap_rst_map am4_device_rst_map[] = {
};
static const struct omap_prm_data am4_prm_data[] = {
- { .name = "gfx", .base = 0x44df0400, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_0, .clkdm_name = "gfx_l3" },
+ {
+ .name = "gfx", .base = 0x44df0400,
+ .pwrstctrl = 0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_0, .clkdm_name = "gfx_l3",
+ },
{ .name = "per", .base = 0x44df0800, .rstctrl = 0x10, .rstst = 0x14, .rstmap = am4_per_rst_map, .clkdm_name = "pruss_ocp" },
{ .name = "wkup", .base = 0x44df2000, .rstctrl = 0x10, .rstst = 0x14, .rstmap = am3_wkup_rst_map, .flags = OMAP_PRM_HAS_NO_CLKDM },
{ .name = "device", .base = 0x44df4000, .rstctrl = 0x0, .rstst = 0x4, .rstmap = am4_device_rst_map, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
@@ -151,6 +230,180 @@ static const struct of_device_id omap_prm_id_table[] = {
{ },
};
+#ifdef DEBUG
+static void omap_prm_domain_show_state(struct omap_prm_domain *prmd,
+ const char *desc)
+{
+ dev_dbg(prmd->dev, "%s %s: %08x/%08x\n",
+ prmd->pd.name, desc,
+ readl_relaxed(prmd->prm->base + prmd->pwrstctrl),
+ readl_relaxed(prmd->prm->base + prmd->pwrstst));
+}
+#else
+static inline void omap_prm_domain_show_state(struct omap_prm_domain *prmd,
+ const char *desc)
+{
+}
+#endif
+
+static int omap_prm_domain_power_on(struct generic_pm_domain *domain)
+{
+ struct omap_prm_domain *prmd;
+ int ret;
+ u32 v;
+
+ prmd = genpd_to_prm_domain(domain);
+ if (!prmd->cap)
+ return 0;
+
+ omap_prm_domain_show_state(prmd, "on: previous state");
+
+ if (prmd->pwrstctrl_saved)
+ v = prmd->pwrstctrl_saved;
+ else
+ v = readl_relaxed(prmd->prm->base + prmd->pwrstctrl);
+
+ writel_relaxed(v | OMAP_PRMD_ON_ACTIVE,
+ prmd->prm->base + prmd->pwrstctrl);
+
+ /* wait for the transition bit to get cleared */
+ ret = readl_relaxed_poll_timeout(prmd->prm->base + prmd->pwrstst,
+ v, !(v & PRM_ST_INTRANSITION), 1,
+ PRM_STATE_MAX_WAIT);
+ if (ret)
+ dev_err(prmd->dev, "%s: %s timed out\n",
+ prmd->pd.name, __func__);
+
+ omap_prm_domain_show_state(prmd, "on: new state");
+
+ return ret;
+}
+
+/* No need to check for holes in the mask for the lowest mode */
+static int omap_prm_domain_find_lowest(struct omap_prm_domain *prmd)
+{
+ return __ffs(prmd->cap->usable_modes);
+}
+
+static int omap_prm_domain_power_off(struct generic_pm_domain *domain)
+{
+ struct omap_prm_domain *prmd;
+ int ret;
+ u32 v;
+
+ prmd = genpd_to_prm_domain(domain);
+ if (!prmd->cap)
+ return 0;
+
+ omap_prm_domain_show_state(prmd, "off: previous state");
+
+ v = readl_relaxed(prmd->prm->base + prmd->pwrstctrl);
+ prmd->pwrstctrl_saved = v;
+
+ v &= ~PRM_POWERSTATE_MASK;
+ v |= omap_prm_domain_find_lowest(prmd);
+
+ if (prmd->cap->statechange)
+ v |= PRM_LOWPOWERSTATECHANGE;
+ if (prmd->cap->logicretstate)
+ v &= ~PRM_LOGICRETSTATE;
+ else
+ v |= PRM_LOGICRETSTATE;
+
+ writel_relaxed(v, prmd->prm->base + prmd->pwrstctrl);
+
+ /* wait for the transition bit to get cleared */
+ ret = readl_relaxed_poll_timeout(prmd->prm->base + prmd->pwrstst,
+ v, !(v & PRM_ST_INTRANSITION), 1,
+ PRM_STATE_MAX_WAIT);
+ if (ret)
+ dev_warn(prmd->dev, "%s: %s timed out\n",
+ __func__, prmd->pd.name);
+
+ omap_prm_domain_show_state(prmd, "off: new state");
+
+ return 0;
+}
+
+static int omap_prm_domain_attach_dev(struct generic_pm_domain *domain,
+ struct device *dev)
+{
+ struct generic_pm_domain_data *genpd_data;
+ struct of_phandle_args pd_args;
+ struct omap_prm_domain *prmd;
+ struct device_node *np;
+ int ret;
+
+ prmd = genpd_to_prm_domain(domain);
+ np = dev->of_node;
+
+ ret = of_parse_phandle_with_args(np, "power-domains",
+ "#power-domain-cells", 0, &pd_args);
+ if (ret < 0)
+ return ret;
+
+ if (pd_args.args_count != 0)
+ dev_warn(dev, "%s: unusupported #power-domain-cells: %i\n",
+ prmd->pd.name, pd_args.args_count);
+
+ genpd_data = dev_gpd_data(dev);
+ genpd_data->data = NULL;
+
+ return 0;
+}
+
+static void omap_prm_domain_detach_dev(struct generic_pm_domain *domain,
+ struct device *dev)
+{
+ struct generic_pm_domain_data *genpd_data;
+
+ genpd_data = dev_gpd_data(dev);
+ genpd_data->data = NULL;
+}
+
+static int omap_prm_domain_init(struct device *dev, struct omap_prm *prm)
+{
+ struct omap_prm_domain *prmd;
+ struct device_node *np = dev->of_node;
+ const struct omap_prm_data *data;
+ const char *name;
+ int error;
+
+ if (!of_find_property(dev->of_node, "#power-domain-cells", NULL))
+ return 0;
+
+ of_node_put(dev->of_node);
+
+ prmd = devm_kzalloc(dev, sizeof(*prmd), GFP_KERNEL);
+ if (!prmd)
+ return -ENOMEM;
+
+ data = prm->data;
+ name = devm_kasprintf(dev, GFP_KERNEL, "prm_%s",
+ data->name);
+
+ prmd->dev = dev;
+ prmd->prm = prm;
+ prmd->cap = prmd->prm->data->dmap;
+ prmd->pwrstctrl = prmd->prm->data->pwrstctrl;
+ prmd->pwrstst = prmd->prm->data->pwrstst;
+
+ prmd->pd.name = name;
+ prmd->pd.power_on = omap_prm_domain_power_on;
+ prmd->pd.power_off = omap_prm_domain_power_off;
+ prmd->pd.attach_dev = omap_prm_domain_attach_dev;
+ prmd->pd.detach_dev = omap_prm_domain_detach_dev;
+
+ pm_genpd_init(&prmd->pd, NULL, true);
+ error = of_genpd_add_provider_simple(np, &prmd->pd);
+ if (error)
+ pm_genpd_remove(&prmd->pd);
+ else
+ prm->prmd = prmd;
+
+ return error;
+}
+
static bool _is_valid_reset(struct omap_reset_data *reset, unsigned long id)
{
if (reset->mask & BIT(id))
@@ -351,6 +604,7 @@ static int omap_prm_probe(struct platform_device *pdev)
const struct omap_prm_data *data;
struct omap_prm *prm;
const struct of_device_id *match;
+ int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
@@ -378,7 +632,21 @@ static int omap_prm_probe(struct platform_device *pdev)
if (IS_ERR(prm->base))
return PTR_ERR(prm->base);
- return omap_prm_reset_init(pdev, prm);
+ ret = omap_prm_domain_init(&pdev->dev, prm);
+ if (ret)
+ return ret;
+
+ ret = omap_prm_reset_init(pdev, prm);
+ if (ret)
+ goto err_domain;
+
+ return 0;
+
+err_domain:
+ of_genpd_del_provider(pdev->dev.of_node);
+ pm_genpd_remove(&prm->prmd->pd);
+
+ return ret;
}
static struct platform_driver omap_prm_driver = {
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index de0123ec8ad6..d2f5e7001a93 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/platform_data/pm33xx.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
@@ -39,6 +40,8 @@
#define GIC_INT_SET_PENDING_BASE 0x200
#define AM43XX_GIC_DIST_BASE 0x48241000
+static void __iomem *rtc_base_virt;
+static struct clk *rtc_fck;
static u32 rtc_magic_val;
static int (*am33xx_do_wfi_sram)(unsigned long unused);
@@ -90,7 +93,7 @@ static int am33xx_push_sram_idle(void)
ro_sram_data.amx3_pm_sram_data_virt = ocmcram_location_data;
ro_sram_data.amx3_pm_sram_data_phys =
gen_pool_virt_to_phys(sram_pool_data, ocmcram_location_data);
- ro_sram_data.rtc_base_virt = pm_ops->get_rtc_base_addr();
+ ro_sram_data.rtc_base_virt = rtc_base_virt;
/* Save physical address to calculate resume offset during pm init */
am33xx_do_wfi_sram_phys = gen_pool_virt_to_phys(sram_pool,
@@ -158,7 +161,7 @@ static struct wkup_m3_wakeup_src rtc_wake_src(void)
{
u32 i;
- i = __raw_readl(pm_ops->get_rtc_base_addr() + 0x44) & 0x40;
+ i = __raw_readl(rtc_base_virt + 0x44) & 0x40;
if (i) {
retrigger_irq = rtc_alarm_wakeup.irq_nr;
@@ -177,13 +180,24 @@ static int am33xx_rtc_only_idle(unsigned long wfi_flags)
return 0;
}
+/*
+ * Note that the RTC module clock must be re-enabled only for rtc+ddr suspend.
+ * And looks like the module can stay in SYSC_IDLE_SMART_WKUP mode configured
+ * by the interconnect code just fine for both rtc+ddr suspend and retention
+ * suspend.
+ */
static int am33xx_pm_suspend(suspend_state_t suspend_state)
{
int i, ret = 0;
if (suspend_state == PM_SUSPEND_MEM &&
pm_ops->check_off_mode_enable()) {
- pm_ops->prepare_rtc_suspend();
+ ret = clk_prepare_enable(rtc_fck);
+ if (ret) {
+ dev_err(pm33xx_dev, "Failed to enable clock: %i\n", ret);
+ return ret;
+ }
+
pm_ops->save_context();
suspend_wfi_flags |= WFI_FLAG_RTC_ONLY;
clk_save_context();
@@ -236,7 +250,7 @@ static int am33xx_pm_suspend(suspend_state_t suspend_state)
}
if (suspend_state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable())
- pm_ops->prepare_rtc_resume();
+ clk_disable_unprepare(rtc_fck);
return ret;
}
@@ -425,14 +439,28 @@ static int am33xx_pm_rtc_setup(void)
struct device_node *np;
unsigned long val = 0;
struct nvmem_device *nvmem;
+ int error;
np = of_find_node_by_name(NULL, "rtc");
if (of_device_is_available(np)) {
+ /* RTC interconnect target module clock */
+ rtc_fck = of_clk_get_by_name(np->parent, "fck");
+ if (IS_ERR(rtc_fck))
+ return PTR_ERR(rtc_fck);
+
+ rtc_base_virt = of_iomap(np, 0);
+ if (!rtc_base_virt) {
+ pr_warn("PM: could not iomap rtc");
+ error = -ENODEV;
+ goto err_clk_put;
+ }
+
omap_rtc = rtc_class_open("rtc0");
if (!omap_rtc) {
pr_warn("PM: rtc0 not available");
- return -EPROBE_DEFER;
+ error = -EPROBE_DEFER;
+ goto err_iounmap;
}
nvmem = devm_nvmem_device_get(&omap_rtc->dev,
@@ -454,6 +482,13 @@ static int am33xx_pm_rtc_setup(void)
}
return 0;
+
+err_iounmap:
+ iounmap(rtc_base_virt);
+err_clk_put:
+ clk_put(rtc_fck);
+
+ return error;
}
static int am33xx_pm_probe(struct platform_device *pdev)
@@ -544,6 +579,8 @@ static int am33xx_pm_remove(struct platform_device *pdev)
suspend_set_ops(NULL);
wkup_m3_ipc_put(m3_ipc);
am33xx_pm_free_sram();
+ iounmap(rtc_base_virt);
+ clk_put(rtc_fck);
return 0;
}
diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c
new file mode 100644
index 000000000000..cc0b4ad7a3d3
--- /dev/null
+++ b/drivers/soc/ti/pruss.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PRU-ICSS platform driver for various TI SoCs
+ *
+ * Copyright (C) 2014-2020 Texas Instruments Incorporated - http://www.ti.com/
+ * Author(s):
+ * Suman Anna <s-anna@ti.com>
+ * Andrew F. Davis <afd@ti.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/pruss_driver.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/**
+ * struct pruss_private_data - PRUSS driver private data
+ * @has_no_sharedram: flag to indicate the absence of PRUSS Shared Data RAM
+ * @has_core_mux_clock: flag to indicate the presence of PRUSS core clock
+ */
+struct pruss_private_data {
+ bool has_no_sharedram;
+ bool has_core_mux_clock;
+};
+
+static void pruss_of_free_clk_provider(void *data)
+{
+ struct device_node *clk_mux_np = data;
+
+ of_clk_del_provider(clk_mux_np);
+ of_node_put(clk_mux_np);
+}
+
+static int pruss_clk_mux_setup(struct pruss *pruss, struct clk *clk_mux,
+ char *mux_name, struct device_node *clks_np)
+{
+ struct device_node *clk_mux_np;
+ struct device *dev = pruss->dev;
+ char *clk_mux_name;
+ unsigned int num_parents;
+ const char **parent_names;
+ void __iomem *reg;
+ u32 reg_offset;
+ int ret;
+
+ clk_mux_np = of_get_child_by_name(clks_np, mux_name);
+ if (!clk_mux_np) {
+ dev_err(dev, "%pOF is missing its '%s' node\n", clks_np,
+ mux_name);
+ return -ENODEV;
+ }
+
+ num_parents = of_clk_get_parent_count(clk_mux_np);
+ if (num_parents < 1) {
+ dev_err(dev, "mux-clock %pOF must have parents\n", clk_mux_np);
+ ret = -EINVAL;
+ goto put_clk_mux_np;
+ }
+
+ parent_names = devm_kcalloc(dev, sizeof(*parent_names), num_parents,
+ GFP_KERNEL);
+ if (!parent_names) {
+ ret = -ENOMEM;
+ goto put_clk_mux_np;
+ }
+
+ of_clk_parent_fill(clk_mux_np, parent_names, num_parents);
+
+ clk_mux_name = devm_kasprintf(dev, GFP_KERNEL, "%s.%pOFn",
+ dev_name(dev), clk_mux_np);
+ if (!clk_mux_name) {
+ ret = -ENOMEM;
+ goto put_clk_mux_np;
+ }
+
+ ret = of_property_read_u32(clk_mux_np, "reg", &reg_offset);
+ if (ret)
+ goto put_clk_mux_np;
+
+ reg = pruss->cfg_base + reg_offset;
+
+ clk_mux = clk_register_mux(NULL, clk_mux_name, parent_names,
+ num_parents, 0, reg, 0, 1, 0, NULL);
+ if (IS_ERR(clk_mux)) {
+ ret = PTR_ERR(clk_mux);
+ goto put_clk_mux_np;
+ }
+
+ ret = devm_add_action_or_reset(dev, (void(*)(void *))clk_unregister_mux,
+ clk_mux);
+ if (ret) {
+ dev_err(dev, "failed to add clkmux unregister action %d", ret);
+ goto put_clk_mux_np;
+ }
+
+ ret = of_clk_add_provider(clk_mux_np, of_clk_src_simple_get, clk_mux);
+ if (ret)
+ goto put_clk_mux_np;
+
+ ret = devm_add_action_or_reset(dev, pruss_of_free_clk_provider,
+ clk_mux_np);
+ if (ret) {
+ dev_err(dev, "failed to add clkmux free action %d", ret);
+ goto put_clk_mux_np;
+ }
+
+ return 0;
+
+put_clk_mux_np:
+ of_node_put(clk_mux_np);
+ return ret;
+}
+
+static int pruss_clk_init(struct pruss *pruss, struct device_node *cfg_node)
+{
+ const struct pruss_private_data *data;
+ struct device_node *clks_np;
+ struct device *dev = pruss->dev;
+ int ret = 0;
+
+ data = of_device_get_match_data(dev);
+ if (IS_ERR(data))
+ return -ENODEV;
+
+ clks_np = of_get_child_by_name(cfg_node, "clocks");
+ if (!clks_np) {
+ dev_err(dev, "%pOF is missing its 'clocks' node\n", clks_np);
+ return -ENODEV;
+ }
+
+ if (data && data->has_core_mux_clock) {
+ ret = pruss_clk_mux_setup(pruss, pruss->core_clk_mux,
+ "coreclk-mux", clks_np);
+ if (ret) {
+ dev_err(dev, "failed to setup coreclk-mux\n");
+ goto put_clks_node;
+ }
+ }
+
+ ret = pruss_clk_mux_setup(pruss, pruss->iep_clk_mux, "iepclk-mux",
+ clks_np);
+ if (ret) {
+ dev_err(dev, "failed to setup iepclk-mux\n");
+ goto put_clks_node;
+ }
+
+put_clks_node:
+ of_node_put(clks_np);
+
+ return ret;
+}
+
+static struct regmap_config regmap_conf = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int pruss_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev_of_node(dev);
+ struct device_node *child;
+ struct pruss *pruss;
+ struct resource res;
+ int ret, i, index;
+ const struct pruss_private_data *data;
+ const char *mem_names[PRUSS_MEM_MAX] = { "dram0", "dram1", "shrdram2" };
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (IS_ERR(data)) {
+ dev_err(dev, "missing private data\n");
+ return -ENODEV;
+ }
+
+ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "failed to set the DMA coherent mask");
+ return ret;
+ }
+
+ pruss = devm_kzalloc(dev, sizeof(*pruss), GFP_KERNEL);
+ if (!pruss)
+ return -ENOMEM;
+
+ pruss->dev = dev;
+
+ child = of_get_child_by_name(np, "memories");
+ if (!child) {
+ dev_err(dev, "%pOF is missing its 'memories' node\n", child);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < PRUSS_MEM_MAX; i++) {
+ /*
+ * On AM437x one of two PRUSS units don't contain Shared RAM,
+ * skip it
+ */
+ if (data && data->has_no_sharedram && i == PRUSS_MEM_SHRD_RAM2)
+ continue;
+
+ index = of_property_match_string(child, "reg-names",
+ mem_names[i]);
+ if (index < 0) {
+ of_node_put(child);
+ return index;
+ }
+
+ if (of_address_to_resource(child, index, &res)) {
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ pruss->mem_regions[i].va = devm_ioremap(dev, res.start,
+ resource_size(&res));
+ if (!pruss->mem_regions[i].va) {
+ dev_err(dev, "failed to parse and map memory resource %d %s\n",
+ i, mem_names[i]);
+ of_node_put(child);
+ return -ENOMEM;
+ }
+ pruss->mem_regions[i].pa = res.start;
+ pruss->mem_regions[i].size = resource_size(&res);
+
+ dev_dbg(dev, "memory %8s: pa %pa size 0x%zx va %pK\n",
+ mem_names[i], &pruss->mem_regions[i].pa,
+ pruss->mem_regions[i].size, pruss->mem_regions[i].va);
+ }
+ of_node_put(child);
+
+ platform_set_drvdata(pdev, pruss);
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "couldn't enable module\n");
+ pm_runtime_put_noidle(dev);
+ goto rpm_disable;
+ }
+
+ child = of_get_child_by_name(np, "cfg");
+ if (!child) {
+ dev_err(dev, "%pOF is missing its 'cfg' node\n", child);
+ ret = -ENODEV;
+ goto rpm_put;
+ }
+
+ if (of_address_to_resource(child, 0, &res)) {
+ ret = -ENOMEM;
+ goto node_put;
+ }
+
+ pruss->cfg_base = devm_ioremap(dev, res.start, resource_size(&res));
+ if (!pruss->cfg_base) {
+ ret = -ENOMEM;
+ goto node_put;
+ }
+
+ regmap_conf.name = kasprintf(GFP_KERNEL, "%pOFn@%llx", child,
+ (u64)res.start);
+ regmap_conf.max_register = resource_size(&res) - 4;
+
+ pruss->cfg_regmap = devm_regmap_init_mmio(dev, pruss->cfg_base,
+ &regmap_conf);
+ kfree(regmap_conf.name);
+ if (IS_ERR(pruss->cfg_regmap)) {
+ dev_err(dev, "regmap_init_mmio failed for cfg, ret = %ld\n",
+ PTR_ERR(pruss->cfg_regmap));
+ ret = PTR_ERR(pruss->cfg_regmap);
+ goto node_put;
+ }
+
+ ret = pruss_clk_init(pruss, child);
+ if (ret) {
+ dev_err(dev, "failed to setup coreclk-mux\n");
+ goto node_put;
+ }
+
+ ret = devm_of_platform_populate(dev);
+ if (ret) {
+ dev_err(dev, "failed to register child devices\n");
+ goto node_put;
+ }
+
+ of_node_put(child);
+
+ return 0;
+
+node_put:
+ of_node_put(child);
+rpm_put:
+ pm_runtime_put_sync(dev);
+rpm_disable:
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static int pruss_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ devm_of_platform_depopulate(dev);
+
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+/* instance-specific driver private data */
+static const struct pruss_private_data am437x_pruss1_data = {
+ .has_no_sharedram = false,
+};
+
+static const struct pruss_private_data am437x_pruss0_data = {
+ .has_no_sharedram = true,
+};
+
+static const struct pruss_private_data am65x_j721e_pruss_data = {
+ .has_core_mux_clock = true,
+};
+
+static const struct of_device_id pruss_of_match[] = {
+ { .compatible = "ti,am3356-pruss" },
+ { .compatible = "ti,am4376-pruss0", .data = &am437x_pruss0_data, },
+ { .compatible = "ti,am4376-pruss1", .data = &am437x_pruss1_data, },
+ { .compatible = "ti,am5728-pruss" },
+ { .compatible = "ti,k2g-pruss" },
+ { .compatible = "ti,am654-icssg", .data = &am65x_j721e_pruss_data, },
+ { .compatible = "ti,j721e-icssg", .data = &am65x_j721e_pruss_data, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pruss_of_match);
+
+static struct platform_driver pruss_driver = {
+ .driver = {
+ .name = "pruss",
+ .of_match_table = pruss_of_match,
+ },
+ .probe = pruss_probe,
+ .remove = pruss_remove,
+};
+module_platform_driver(pruss_driver);
+
+MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
+MODULE_DESCRIPTION("PRU-ICSS Subsystem Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/ti/smartreflex.c b/drivers/soc/ti/smartreflex.c
new file mode 100644
index 000000000000..5376f3d22f31
--- /dev/null
+++ b/drivers/soc/ti/smartreflex.c
@@ -0,0 +1,1045 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OMAP SmartReflex Voltage Control
+ *
+ * Author: Thara Gopinath <thara@ti.com>
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ * Thara Gopinath <thara@ti.com>
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Kalle Jokiniemi
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ * Lesly A M <x0080970@ti.com>
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/power/smartreflex.h>
+
+#define DRIVER_NAME "smartreflex"
+#define SMARTREFLEX_NAME_LEN 32
+#define NVALUE_NAME_LEN 40
+#define SR_DISABLE_TIMEOUT 200
+
+/* sr_list contains all the instances of smartreflex module */
+static LIST_HEAD(sr_list);
+
+static struct omap_sr_class_data *sr_class;
+static struct dentry *sr_dbg_dir;
+
+static inline void sr_write_reg(struct omap_sr *sr, unsigned offset, u32 value)
+{
+ __raw_writel(value, (sr->base + offset));
+}
+
+static inline void sr_modify_reg(struct omap_sr *sr, unsigned offset, u32 mask,
+ u32 value)
+{
+ u32 reg_val;
+
+ /*
+ * Smartreflex error config register is special as it contains
+ * certain status bits which if written a 1 into means a clear
+ * of those bits. So in order to make sure no accidental write of
+ * 1 happens to those status bits, do a clear of them in the read
+ * value. This mean this API doesn't rewrite values in these bits
+ * if they are currently set, but does allow the caller to write
+ * those bits.
+ */
+ if (sr->ip_type == SR_TYPE_V1 && offset == ERRCONFIG_V1)
+ mask |= ERRCONFIG_STATUS_V1_MASK;
+ else if (sr->ip_type == SR_TYPE_V2 && offset == ERRCONFIG_V2)
+ mask |= ERRCONFIG_VPBOUNDINTST_V2;
+
+ reg_val = __raw_readl(sr->base + offset);
+ reg_val &= ~mask;
+
+ value &= mask;
+
+ reg_val |= value;
+
+ __raw_writel(reg_val, (sr->base + offset));
+}
+
+static inline u32 sr_read_reg(struct omap_sr *sr, unsigned offset)
+{
+ return __raw_readl(sr->base + offset);
+}
+
+static struct omap_sr *_sr_lookup(struct voltagedomain *voltdm)
+{
+ struct omap_sr *sr_info;
+
+ if (!voltdm) {
+ pr_err("%s: Null voltage domain passed!\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ list_for_each_entry(sr_info, &sr_list, node) {
+ if (voltdm == sr_info->voltdm)
+ return sr_info;
+ }
+
+ return ERR_PTR(-ENODATA);
+}
+
+static irqreturn_t sr_interrupt(int irq, void *data)
+{
+ struct omap_sr *sr_info = data;
+ u32 status = 0;
+
+ switch (sr_info->ip_type) {
+ case SR_TYPE_V1:
+ /* Read the status bits */
+ status = sr_read_reg(sr_info, ERRCONFIG_V1);
+
+ /* Clear them by writing back */
+ sr_write_reg(sr_info, ERRCONFIG_V1, status);
+ break;
+ case SR_TYPE_V2:
+ /* Read the status bits */
+ status = sr_read_reg(sr_info, IRQSTATUS);
+
+ /* Clear them by writing back */
+ sr_write_reg(sr_info, IRQSTATUS, status);
+ break;
+ default:
+ dev_err(&sr_info->pdev->dev, "UNKNOWN IP type %d\n",
+ sr_info->ip_type);
+ return IRQ_NONE;
+ }
+
+ if (sr_class->notify)
+ sr_class->notify(sr_info, status);
+
+ return IRQ_HANDLED;
+}
+
+static void sr_set_clk_length(struct omap_sr *sr)
+{
+ struct clk *fck;
+ u32 fclk_speed;
+
+ /* Try interconnect target module fck first if it already exists */
+ fck = clk_get(sr->pdev->dev.parent, "fck");
+ if (IS_ERR(fck)) {
+ fck = clk_get(&sr->pdev->dev, "fck");
+ if (IS_ERR(fck)) {
+ dev_err(&sr->pdev->dev,
+ "%s: unable to get fck for device %s\n",
+ __func__, dev_name(&sr->pdev->dev));
+ return;
+ }
+ }
+
+ fclk_speed = clk_get_rate(fck);
+ clk_put(fck);
+
+ switch (fclk_speed) {
+ case 12000000:
+ sr->clk_length = SRCLKLENGTH_12MHZ_SYSCLK;
+ break;
+ case 13000000:
+ sr->clk_length = SRCLKLENGTH_13MHZ_SYSCLK;
+ break;
+ case 19200000:
+ sr->clk_length = SRCLKLENGTH_19MHZ_SYSCLK;
+ break;
+ case 26000000:
+ sr->clk_length = SRCLKLENGTH_26MHZ_SYSCLK;
+ break;
+ case 38400000:
+ sr->clk_length = SRCLKLENGTH_38MHZ_SYSCLK;
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Invalid fclk rate: %d\n",
+ __func__, fclk_speed);
+ break;
+ }
+}
+
+static void sr_start_vddautocomp(struct omap_sr *sr)
+{
+ if (!sr_class || !(sr_class->enable) || !(sr_class->configure)) {
+ dev_warn(&sr->pdev->dev,
+ "%s: smartreflex class driver not registered\n",
+ __func__);
+ return;
+ }
+
+ if (!sr_class->enable(sr))
+ sr->autocomp_active = true;
+}
+
+static void sr_stop_vddautocomp(struct omap_sr *sr)
+{
+ if (!sr_class || !(sr_class->disable)) {
+ dev_warn(&sr->pdev->dev,
+ "%s: smartreflex class driver not registered\n",
+ __func__);
+ return;
+ }
+
+ if (sr->autocomp_active) {
+ sr_class->disable(sr, 1);
+ sr->autocomp_active = false;
+ }
+}
+
+/*
+ * This function handles the initializations which have to be done
+ * only when both sr device and class driver regiter has
+ * completed. This will be attempted to be called from both sr class
+ * driver register and sr device intializtion API's. Only one call
+ * will ultimately succeed.
+ *
+ * Currently this function registers interrupt handler for a particular SR
+ * if smartreflex class driver is already registered and has
+ * requested for interrupts and the SR interrupt line in present.
+ */
+static int sr_late_init(struct omap_sr *sr_info)
+{
+ struct omap_sr_data *pdata = sr_info->pdev->dev.platform_data;
+ int ret = 0;
+
+ if (sr_class->notify && sr_class->notify_flags && sr_info->irq) {
+ ret = devm_request_irq(&sr_info->pdev->dev, sr_info->irq,
+ sr_interrupt, 0, sr_info->name, sr_info);
+ if (ret)
+ goto error;
+ disable_irq(sr_info->irq);
+ }
+
+ if (pdata && pdata->enable_on_init)
+ sr_start_vddautocomp(sr_info);
+
+ return ret;
+
+error:
+ list_del(&sr_info->node);
+ dev_err(&sr_info->pdev->dev, "%s: ERROR in registering interrupt handler. Smartreflex will not function as desired\n",
+ __func__);
+
+ return ret;
+}
+
+static void sr_v1_disable(struct omap_sr *sr)
+{
+ int timeout = 0;
+ int errconf_val = ERRCONFIG_MCUACCUMINTST | ERRCONFIG_MCUVALIDINTST |
+ ERRCONFIG_MCUBOUNDINTST;
+
+ /* Enable MCUDisableAcknowledge interrupt */
+ sr_modify_reg(sr, ERRCONFIG_V1,
+ ERRCONFIG_MCUDISACKINTEN, ERRCONFIG_MCUDISACKINTEN);
+
+ /* SRCONFIG - disable SR */
+ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, 0x0);
+
+ /* Disable all other SR interrupts and clear the status as needed */
+ if (sr_read_reg(sr, ERRCONFIG_V1) & ERRCONFIG_VPBOUNDINTST_V1)
+ errconf_val |= ERRCONFIG_VPBOUNDINTST_V1;
+ sr_modify_reg(sr, ERRCONFIG_V1,
+ (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUVALIDINTEN |
+ ERRCONFIG_MCUBOUNDINTEN | ERRCONFIG_VPBOUNDINTEN_V1),
+ errconf_val);
+
+ /*
+ * Wait for SR to be disabled.
+ * wait until ERRCONFIG.MCUDISACKINTST = 1. Typical latency is 1us.
+ */
+ sr_test_cond_timeout((sr_read_reg(sr, ERRCONFIG_V1) &
+ ERRCONFIG_MCUDISACKINTST), SR_DISABLE_TIMEOUT,
+ timeout);
+
+ if (timeout >= SR_DISABLE_TIMEOUT)
+ dev_warn(&sr->pdev->dev, "%s: Smartreflex disable timedout\n",
+ __func__);
+
+ /* Disable MCUDisableAcknowledge interrupt & clear pending interrupt */
+ sr_modify_reg(sr, ERRCONFIG_V1, ERRCONFIG_MCUDISACKINTEN,
+ ERRCONFIG_MCUDISACKINTST);
+}
+
+static void sr_v2_disable(struct omap_sr *sr)
+{
+ int timeout = 0;
+
+ /* Enable MCUDisableAcknowledge interrupt */
+ sr_write_reg(sr, IRQENABLE_SET, IRQENABLE_MCUDISABLEACKINT);
+
+ /* SRCONFIG - disable SR */
+ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, 0x0);
+
+ /*
+ * Disable all other SR interrupts and clear the status
+ * write to status register ONLY on need basis - only if status
+ * is set.
+ */
+ if (sr_read_reg(sr, ERRCONFIG_V2) & ERRCONFIG_VPBOUNDINTST_V2)
+ sr_modify_reg(sr, ERRCONFIG_V2, ERRCONFIG_VPBOUNDINTEN_V2,
+ ERRCONFIG_VPBOUNDINTST_V2);
+ else
+ sr_modify_reg(sr, ERRCONFIG_V2, ERRCONFIG_VPBOUNDINTEN_V2,
+ 0x0);
+ sr_write_reg(sr, IRQENABLE_CLR, (IRQENABLE_MCUACCUMINT |
+ IRQENABLE_MCUVALIDINT |
+ IRQENABLE_MCUBOUNDSINT));
+ sr_write_reg(sr, IRQSTATUS, (IRQSTATUS_MCUACCUMINT |
+ IRQSTATUS_MCVALIDINT |
+ IRQSTATUS_MCBOUNDSINT));
+
+ /*
+ * Wait for SR to be disabled.
+ * wait until IRQSTATUS.MCUDISACKINTST = 1. Typical latency is 1us.
+ */
+ sr_test_cond_timeout((sr_read_reg(sr, IRQSTATUS) &
+ IRQSTATUS_MCUDISABLEACKINT), SR_DISABLE_TIMEOUT,
+ timeout);
+
+ if (timeout >= SR_DISABLE_TIMEOUT)
+ dev_warn(&sr->pdev->dev, "%s: Smartreflex disable timedout\n",
+ __func__);
+
+ /* Disable MCUDisableAcknowledge interrupt & clear pending interrupt */
+ sr_write_reg(sr, IRQENABLE_CLR, IRQENABLE_MCUDISABLEACKINT);
+ sr_write_reg(sr, IRQSTATUS, IRQSTATUS_MCUDISABLEACKINT);
+}
+
+static struct omap_sr_nvalue_table *sr_retrieve_nvalue_row(
+ struct omap_sr *sr, u32 efuse_offs)
+{
+ int i;
+
+ if (!sr->nvalue_table) {
+ dev_warn(&sr->pdev->dev, "%s: Missing ntarget value table\n",
+ __func__);
+ return NULL;
+ }
+
+ for (i = 0; i < sr->nvalue_count; i++) {
+ if (sr->nvalue_table[i].efuse_offs == efuse_offs)
+ return &sr->nvalue_table[i];
+ }
+
+ return NULL;
+}
+
+/* Public Functions */
+
+/**
+ * sr_configure_errgen() - Configures the SmartReflex to perform AVS using the
+ * error generator module.
+ * @sr: SR module to be configured.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * configure the error generator module inside the smartreflex module.
+ * SR settings if using the ERROR module inside Smartreflex.
+ * SR CLASS 3 by default uses only the ERROR module where as
+ * SR CLASS 2 can choose between ERROR module and MINMAXAVG
+ * module. Returns 0 on success and error value in case of failure.
+ */
+int sr_configure_errgen(struct omap_sr *sr)
+{
+ u32 sr_config, sr_errconfig, errconfig_offs;
+ u32 vpboundint_en, vpboundint_st;
+ u32 senp_en = 0, senn_en = 0;
+ u8 senp_shift, senn_shift;
+
+ if (!sr) {
+ pr_warn("%s: NULL omap_sr from %pS\n",
+ __func__, (void *)_RET_IP_);
+ return -EINVAL;
+ }
+
+ if (!sr->clk_length)
+ sr_set_clk_length(sr);
+
+ senp_en = sr->senp_mod;
+ senn_en = sr->senn_mod;
+
+ sr_config = (sr->clk_length << SRCONFIG_SRCLKLENGTH_SHIFT) |
+ SRCONFIG_SENENABLE | SRCONFIG_ERRGEN_EN;
+
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ sr_config |= SRCONFIG_DELAYCTRL;
+ senn_shift = SRCONFIG_SENNENABLE_V1_SHIFT;
+ senp_shift = SRCONFIG_SENPENABLE_V1_SHIFT;
+ errconfig_offs = ERRCONFIG_V1;
+ vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V1;
+ vpboundint_st = ERRCONFIG_VPBOUNDINTST_V1;
+ break;
+ case SR_TYPE_V2:
+ senn_shift = SRCONFIG_SENNENABLE_V2_SHIFT;
+ senp_shift = SRCONFIG_SENPENABLE_V2_SHIFT;
+ errconfig_offs = ERRCONFIG_V2;
+ vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V2;
+ vpboundint_st = ERRCONFIG_VPBOUNDINTST_V2;
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex module without specifying the ip\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ sr_config |= ((senn_en << senn_shift) | (senp_en << senp_shift));
+ sr_write_reg(sr, SRCONFIG, sr_config);
+ sr_errconfig = (sr->err_weight << ERRCONFIG_ERRWEIGHT_SHIFT) |
+ (sr->err_maxlimit << ERRCONFIG_ERRMAXLIMIT_SHIFT) |
+ (sr->err_minlimit << ERRCONFIG_ERRMINLIMIT_SHIFT);
+ sr_modify_reg(sr, errconfig_offs, (SR_ERRWEIGHT_MASK |
+ SR_ERRMAXLIMIT_MASK | SR_ERRMINLIMIT_MASK),
+ sr_errconfig);
+
+ /* Enabling the interrupts if the ERROR module is used */
+ sr_modify_reg(sr, errconfig_offs, (vpboundint_en | vpboundint_st),
+ vpboundint_en);
+
+ return 0;
+}
+
+/**
+ * sr_disable_errgen() - Disables SmartReflex AVS module's errgen component
+ * @sr: SR module to be configured.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * disable the error generator module inside the smartreflex module.
+ *
+ * Returns 0 on success and error value in case of failure.
+ */
+int sr_disable_errgen(struct omap_sr *sr)
+{
+ u32 errconfig_offs;
+ u32 vpboundint_en, vpboundint_st;
+
+ if (!sr) {
+ pr_warn("%s: NULL omap_sr from %pS\n",
+ __func__, (void *)_RET_IP_);
+ return -EINVAL;
+ }
+
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ errconfig_offs = ERRCONFIG_V1;
+ vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V1;
+ vpboundint_st = ERRCONFIG_VPBOUNDINTST_V1;
+ break;
+ case SR_TYPE_V2:
+ errconfig_offs = ERRCONFIG_V2;
+ vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V2;
+ vpboundint_st = ERRCONFIG_VPBOUNDINTST_V2;
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex module without specifying the ip\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Disable the Sensor and errorgen */
+ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SENENABLE | SRCONFIG_ERRGEN_EN, 0);
+
+ /*
+ * Disable the interrupts of ERROR module
+ * NOTE: modify is a read, modify,write - an implicit OCP barrier
+ * which is required is present here - sequencing is critical
+ * at this point (after errgen is disabled, vpboundint disable)
+ */
+ sr_modify_reg(sr, errconfig_offs, vpboundint_en | vpboundint_st, 0);
+
+ return 0;
+}
+
+/**
+ * sr_configure_minmax() - Configures the SmartReflex to perform AVS using the
+ * minmaxavg module.
+ * @sr: SR module to be configured.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * configure the minmaxavg module inside the smartreflex module.
+ * SR settings if using the ERROR module inside Smartreflex.
+ * SR CLASS 3 by default uses only the ERROR module where as
+ * SR CLASS 2 can choose between ERROR module and MINMAXAVG
+ * module. Returns 0 on success and error value in case of failure.
+ */
+int sr_configure_minmax(struct omap_sr *sr)
+{
+ u32 sr_config, sr_avgwt;
+ u32 senp_en = 0, senn_en = 0;
+ u8 senp_shift, senn_shift;
+
+ if (!sr) {
+ pr_warn("%s: NULL omap_sr from %pS\n",
+ __func__, (void *)_RET_IP_);
+ return -EINVAL;
+ }
+
+ if (!sr->clk_length)
+ sr_set_clk_length(sr);
+
+ senp_en = sr->senp_mod;
+ senn_en = sr->senn_mod;
+
+ sr_config = (sr->clk_length << SRCONFIG_SRCLKLENGTH_SHIFT) |
+ SRCONFIG_SENENABLE |
+ (sr->accum_data << SRCONFIG_ACCUMDATA_SHIFT);
+
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ sr_config |= SRCONFIG_DELAYCTRL;
+ senn_shift = SRCONFIG_SENNENABLE_V1_SHIFT;
+ senp_shift = SRCONFIG_SENPENABLE_V1_SHIFT;
+ break;
+ case SR_TYPE_V2:
+ senn_shift = SRCONFIG_SENNENABLE_V2_SHIFT;
+ senp_shift = SRCONFIG_SENPENABLE_V2_SHIFT;
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex module without specifying the ip\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ sr_config |= ((senn_en << senn_shift) | (senp_en << senp_shift));
+ sr_write_reg(sr, SRCONFIG, sr_config);
+ sr_avgwt = (sr->senp_avgweight << AVGWEIGHT_SENPAVGWEIGHT_SHIFT) |
+ (sr->senn_avgweight << AVGWEIGHT_SENNAVGWEIGHT_SHIFT);
+ sr_write_reg(sr, AVGWEIGHT, sr_avgwt);
+
+ /*
+ * Enabling the interrupts if MINMAXAVG module is used.
+ * TODO: check if all the interrupts are mandatory
+ */
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ sr_modify_reg(sr, ERRCONFIG_V1,
+ (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUVALIDINTEN |
+ ERRCONFIG_MCUBOUNDINTEN),
+ (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUACCUMINTST |
+ ERRCONFIG_MCUVALIDINTEN | ERRCONFIG_MCUVALIDINTST |
+ ERRCONFIG_MCUBOUNDINTEN | ERRCONFIG_MCUBOUNDINTST));
+ break;
+ case SR_TYPE_V2:
+ sr_write_reg(sr, IRQSTATUS,
+ IRQSTATUS_MCUACCUMINT | IRQSTATUS_MCVALIDINT |
+ IRQSTATUS_MCBOUNDSINT | IRQSTATUS_MCUDISABLEACKINT);
+ sr_write_reg(sr, IRQENABLE_SET,
+ IRQENABLE_MCUACCUMINT | IRQENABLE_MCUVALIDINT |
+ IRQENABLE_MCUBOUNDSINT | IRQENABLE_MCUDISABLEACKINT);
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex module without specifying the ip\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * sr_enable() - Enables the smartreflex module.
+ * @sr: pointer to which the SR module to be configured belongs to.
+ * @volt: The voltage at which the Voltage domain associated with
+ * the smartreflex module is operating at.
+ * This is required only to program the correct Ntarget value.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * enable a smartreflex module. Returns 0 on success. Returns error
+ * value if the voltage passed is wrong or if ntarget value is wrong.
+ */
+int sr_enable(struct omap_sr *sr, unsigned long volt)
+{
+ struct omap_volt_data *volt_data;
+ struct omap_sr_nvalue_table *nvalue_row;
+ int ret;
+
+ if (!sr) {
+ pr_warn("%s: NULL omap_sr from %pS\n",
+ __func__, (void *)_RET_IP_);
+ return -EINVAL;
+ }
+
+ volt_data = omap_voltage_get_voltdata(sr->voltdm, volt);
+
+ if (IS_ERR(volt_data)) {
+ dev_warn(&sr->pdev->dev, "%s: Unable to get voltage table for nominal voltage %ld\n",
+ __func__, volt);
+ return PTR_ERR(volt_data);
+ }
+
+ nvalue_row = sr_retrieve_nvalue_row(sr, volt_data->sr_efuse_offs);
+
+ if (!nvalue_row) {
+ dev_warn(&sr->pdev->dev, "%s: failure getting SR data for this voltage %ld\n",
+ __func__, volt);
+ return -ENODATA;
+ }
+
+ /* errminlimit is opp dependent and hence linked to voltage */
+ sr->err_minlimit = nvalue_row->errminlimit;
+
+ pm_runtime_get_sync(&sr->pdev->dev);
+
+ /* Check if SR is already enabled. If yes do nothing */
+ if (sr_read_reg(sr, SRCONFIG) & SRCONFIG_SRENABLE)
+ return 0;
+
+ /* Configure SR */
+ ret = sr_class->configure(sr);
+ if (ret)
+ return ret;
+
+ sr_write_reg(sr, NVALUERECIPROCAL, nvalue_row->nvalue);
+
+ /* SRCONFIG - enable SR */
+ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, SRCONFIG_SRENABLE);
+ return 0;
+}
+
+/**
+ * sr_disable() - Disables the smartreflex module.
+ * @sr: pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * disable a smartreflex module.
+ */
+void sr_disable(struct omap_sr *sr)
+{
+ if (!sr) {
+ pr_warn("%s: NULL omap_sr from %pS\n",
+ __func__, (void *)_RET_IP_);
+ return;
+ }
+
+ /* Check if SR clocks are already disabled. If yes do nothing */
+ if (pm_runtime_suspended(&sr->pdev->dev))
+ return;
+
+ /*
+ * Disable SR if only it is indeed enabled. Else just
+ * disable the clocks.
+ */
+ if (sr_read_reg(sr, SRCONFIG) & SRCONFIG_SRENABLE) {
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ sr_v1_disable(sr);
+ break;
+ case SR_TYPE_V2:
+ sr_v2_disable(sr);
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "UNKNOWN IP type %d\n",
+ sr->ip_type);
+ }
+ }
+
+ pm_runtime_put_sync_suspend(&sr->pdev->dev);
+}
+
+/**
+ * sr_register_class() - API to register a smartreflex class parameters.
+ * @class_data: The structure containing various sr class specific data.
+ *
+ * This API is to be called by the smartreflex class driver to register itself
+ * with the smartreflex driver during init. Returns 0 on success else the
+ * error value.
+ */
+int sr_register_class(struct omap_sr_class_data *class_data)
+{
+ struct omap_sr *sr_info;
+
+ if (!class_data) {
+ pr_warn("%s:, Smartreflex class data passed is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (sr_class) {
+ pr_warn("%s: Smartreflex class driver already registered\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ sr_class = class_data;
+
+ /*
+ * Call into late init to do initializations that require
+ * both sr driver and sr class driver to be initiallized.
+ */
+ list_for_each_entry(sr_info, &sr_list, node)
+ sr_late_init(sr_info);
+
+ return 0;
+}
+
+/**
+ * omap_sr_enable() - API to enable SR clocks and to call into the
+ * registered smartreflex class enable API.
+ * @voltdm: VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the kernel in order to enable
+ * a particular smartreflex module. This API will do the initial
+ * configurations to turn on the smartreflex module and in turn call
+ * into the registered smartreflex class enable API.
+ */
+void omap_sr_enable(struct voltagedomain *voltdm)
+{
+ struct omap_sr *sr = _sr_lookup(voltdm);
+
+ if (IS_ERR(sr)) {
+ pr_warn("%s: omap_sr struct for voltdm not found\n", __func__);
+ return;
+ }
+
+ if (!sr->autocomp_active)
+ return;
+
+ if (!sr_class || !(sr_class->enable) || !(sr_class->configure)) {
+ dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not registered\n",
+ __func__);
+ return;
+ }
+
+ sr_class->enable(sr);
+}
+
+/**
+ * omap_sr_disable() - API to disable SR without resetting the voltage
+ * processor voltage
+ * @voltdm: VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the kernel in order to disable
+ * a particular smartreflex module. This API will in turn call
+ * into the registered smartreflex class disable API. This API will tell
+ * the smartreflex class disable not to reset the VP voltage after
+ * disabling smartreflex.
+ */
+void omap_sr_disable(struct voltagedomain *voltdm)
+{
+ struct omap_sr *sr = _sr_lookup(voltdm);
+
+ if (IS_ERR(sr)) {
+ pr_warn("%s: omap_sr struct for voltdm not found\n", __func__);
+ return;
+ }
+
+ if (!sr->autocomp_active)
+ return;
+
+ if (!sr_class || !(sr_class->disable)) {
+ dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not registered\n",
+ __func__);
+ return;
+ }
+
+ sr_class->disable(sr, 0);
+}
+
+/**
+ * omap_sr_disable_reset_volt() - API to disable SR and reset the
+ * voltage processor voltage
+ * @voltdm: VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the kernel in order to disable
+ * a particular smartreflex module. This API will in turn call
+ * into the registered smartreflex class disable API. This API will tell
+ * the smartreflex class disable to reset the VP voltage after
+ * disabling smartreflex.
+ */
+void omap_sr_disable_reset_volt(struct voltagedomain *voltdm)
+{
+ struct omap_sr *sr = _sr_lookup(voltdm);
+
+ if (IS_ERR(sr)) {
+ pr_warn("%s: omap_sr struct for voltdm not found\n", __func__);
+ return;
+ }
+
+ if (!sr->autocomp_active)
+ return;
+
+ if (!sr_class || !(sr_class->disable)) {
+ dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not registered\n",
+ __func__);
+ return;
+ }
+
+ sr_class->disable(sr, 1);
+}
+
+/* PM Debug FS entries to enable and disable smartreflex. */
+static int omap_sr_autocomp_show(void *data, u64 *val)
+{
+ struct omap_sr *sr_info = data;
+
+ if (!sr_info) {
+ pr_warn("%s: omap_sr struct not found\n", __func__);
+ return -EINVAL;
+ }
+
+ *val = sr_info->autocomp_active;
+
+ return 0;
+}
+
+static int omap_sr_autocomp_store(void *data, u64 val)
+{
+ struct omap_sr *sr_info = data;
+
+ if (!sr_info) {
+ pr_warn("%s: omap_sr struct not found\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Sanity check */
+ if (val > 1) {
+ pr_warn("%s: Invalid argument %lld\n", __func__, val);
+ return -EINVAL;
+ }
+
+ /* control enable/disable only if there is a delta in value */
+ if (sr_info->autocomp_active != val) {
+ if (!val)
+ sr_stop_vddautocomp(sr_info);
+ else
+ sr_start_vddautocomp(sr_info);
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pm_sr_fops, omap_sr_autocomp_show,
+ omap_sr_autocomp_store, "%llu\n");
+
+static int omap_sr_probe(struct platform_device *pdev)
+{
+ struct omap_sr *sr_info;
+ struct omap_sr_data *pdata = pdev->dev.platform_data;
+ struct resource *mem, *irq;
+ struct dentry *nvalue_dir;
+ int i, ret = 0;
+
+ sr_info = devm_kzalloc(&pdev->dev, sizeof(struct omap_sr), GFP_KERNEL);
+ if (!sr_info)
+ return -ENOMEM;
+
+ sr_info->name = devm_kzalloc(&pdev->dev,
+ SMARTREFLEX_NAME_LEN, GFP_KERNEL);
+ if (!sr_info->name)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, sr_info);
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
+ return -EINVAL;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sr_info->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(sr_info->base)) {
+ dev_err(&pdev->dev, "%s: ioremap fail\n", __func__);
+ return PTR_ERR(sr_info->base);
+ }
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_irq_safe(&pdev->dev);
+
+ snprintf(sr_info->name, SMARTREFLEX_NAME_LEN, "%s", pdata->name);
+
+ sr_info->pdev = pdev;
+ sr_info->srid = pdev->id;
+ sr_info->voltdm = pdata->voltdm;
+ sr_info->nvalue_table = pdata->nvalue_table;
+ sr_info->nvalue_count = pdata->nvalue_count;
+ sr_info->senn_mod = pdata->senn_mod;
+ sr_info->senp_mod = pdata->senp_mod;
+ sr_info->err_weight = pdata->err_weight;
+ sr_info->err_maxlimit = pdata->err_maxlimit;
+ sr_info->accum_data = pdata->accum_data;
+ sr_info->senn_avgweight = pdata->senn_avgweight;
+ sr_info->senp_avgweight = pdata->senp_avgweight;
+ sr_info->autocomp_active = false;
+ sr_info->ip_type = pdata->ip_type;
+
+ if (irq)
+ sr_info->irq = irq->start;
+
+ sr_set_clk_length(sr_info);
+
+ list_add(&sr_info->node, &sr_list);
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ goto err_list_del;
+ }
+
+ /*
+ * Call into late init to do initializations that require
+ * both sr driver and sr class driver to be initiallized.
+ */
+ if (sr_class) {
+ ret = sr_late_init(sr_info);
+ if (ret) {
+ pr_warn("%s: Error in SR late init\n", __func__);
+ goto err_list_del;
+ }
+ }
+
+ dev_info(&pdev->dev, "%s: SmartReflex driver initialized\n", __func__);
+ if (!sr_dbg_dir)
+ sr_dbg_dir = debugfs_create_dir("smartreflex", NULL);
+
+ sr_info->dbg_dir = debugfs_create_dir(sr_info->name, sr_dbg_dir);
+
+ debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, sr_info->dbg_dir,
+ sr_info, &pm_sr_fops);
+ debugfs_create_x32("errweight", S_IRUGO, sr_info->dbg_dir,
+ &sr_info->err_weight);
+ debugfs_create_x32("errmaxlimit", S_IRUGO, sr_info->dbg_dir,
+ &sr_info->err_maxlimit);
+
+ nvalue_dir = debugfs_create_dir("nvalue", sr_info->dbg_dir);
+
+ if (sr_info->nvalue_count == 0 || !sr_info->nvalue_table) {
+ dev_warn(&pdev->dev, "%s: %s: No Voltage table for the corresponding vdd. Cannot create debugfs entries for n-values\n",
+ __func__, sr_info->name);
+
+ ret = -ENODATA;
+ goto err_debugfs;
+ }
+
+ for (i = 0; i < sr_info->nvalue_count; i++) {
+ char name[NVALUE_NAME_LEN + 1];
+
+ snprintf(name, sizeof(name), "volt_%lu",
+ sr_info->nvalue_table[i].volt_nominal);
+ debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
+ &(sr_info->nvalue_table[i].nvalue));
+ snprintf(name, sizeof(name), "errminlimit_%lu",
+ sr_info->nvalue_table[i].volt_nominal);
+ debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
+ &(sr_info->nvalue_table[i].errminlimit));
+
+ }
+
+ pm_runtime_put_sync(&pdev->dev);
+
+ return ret;
+
+err_debugfs:
+ debugfs_remove_recursive(sr_info->dbg_dir);
+err_list_del:
+ list_del(&sr_info->node);
+
+ pm_runtime_put_sync(&pdev->dev);
+
+ return ret;
+}
+
+static int omap_sr_remove(struct platform_device *pdev)
+{
+ struct omap_sr_data *pdata = pdev->dev.platform_data;
+ struct omap_sr *sr_info;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
+ return -EINVAL;
+ }
+
+ sr_info = _sr_lookup(pdata->voltdm);
+ if (IS_ERR(sr_info)) {
+ dev_warn(&pdev->dev, "%s: omap_sr struct not found\n",
+ __func__);
+ return PTR_ERR(sr_info);
+ }
+
+ if (sr_info->autocomp_active)
+ sr_stop_vddautocomp(sr_info);
+ debugfs_remove_recursive(sr_info->dbg_dir);
+
+ pm_runtime_disable(&pdev->dev);
+ list_del(&sr_info->node);
+ return 0;
+}
+
+static void omap_sr_shutdown(struct platform_device *pdev)
+{
+ struct omap_sr_data *pdata = pdev->dev.platform_data;
+ struct omap_sr *sr_info;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
+ return;
+ }
+
+ sr_info = _sr_lookup(pdata->voltdm);
+ if (IS_ERR(sr_info)) {
+ dev_warn(&pdev->dev, "%s: omap_sr struct not found\n",
+ __func__);
+ return;
+ }
+
+ if (sr_info->autocomp_active)
+ sr_stop_vddautocomp(sr_info);
+
+ return;
+}
+
+static const struct of_device_id omap_sr_match[] = {
+ { .compatible = "ti,omap3-smartreflex-core", },
+ { .compatible = "ti,omap3-smartreflex-mpu-iva", },
+ { .compatible = "ti,omap4-smartreflex-core", },
+ { .compatible = "ti,omap4-smartreflex-mpu", },
+ { .compatible = "ti,omap4-smartreflex-iva", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, omap_sr_match);
+
+static struct platform_driver smartreflex_driver = {
+ .probe = omap_sr_probe,
+ .remove = omap_sr_remove,
+ .shutdown = omap_sr_shutdown,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = omap_sr_match,
+ },
+};
+
+static int __init sr_init(void)
+{
+ int ret = 0;
+
+ ret = platform_driver_register(&smartreflex_driver);
+ if (ret) {
+ pr_err("%s: platform driver register failed for SR\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+late_initcall(sr_init);
+
+static void __exit sr_exit(void)
+{
+ platform_driver_unregister(&smartreflex_driver);
+}
+module_exit(sr_exit);
+
+MODULE_DESCRIPTION("OMAP Smartreflex Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_AUTHOR("Texas Instruments Inc");
diff --git a/drivers/soc/ti/ti_sci_pm_domains.c b/drivers/soc/ti/ti_sci_pm_domains.c
index 8c2a2f23982c..af2126d2b2ff 100644
--- a/drivers/soc/ti/ti_sci_pm_domains.c
+++ b/drivers/soc/ti/ti_sci_pm_domains.c
@@ -9,7 +9,6 @@
#include <linux/err.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
@@ -18,150 +17,95 @@
#include <dt-bindings/soc/ti,sci_pm_domain.h>
/**
- * struct ti_sci_genpd_dev_data: holds data needed for every device attached
- * to this genpd
- * @idx: index of the device that identifies it with the system
- * control processor.
- * @exclusive: Permissions for exclusive request or shared request of the
- * device.
+ * struct ti_sci_genpd_provider: holds common TI SCI genpd provider data
+ * @ti_sci: handle to TI SCI protocol driver that provides ops to
+ * communicate with system control processor.
+ * @dev: pointer to dev for the driver for devm allocs
+ * @pd_list: list of all the power domains on the device
+ * @data: onecell data for genpd core
*/
-struct ti_sci_genpd_dev_data {
- int idx;
- u8 exclusive;
+struct ti_sci_genpd_provider {
+ const struct ti_sci_handle *ti_sci;
+ struct device *dev;
+ struct list_head pd_list;
+ struct genpd_onecell_data data;
};
/**
* struct ti_sci_pm_domain: TI specific data needed for power domain
- * @ti_sci: handle to TI SCI protocol driver that provides ops to
- * communicate with system control processor.
- * @dev: pointer to dev for the driver for devm allocs
+ * @idx: index of the device that identifies it with the system
+ * control processor.
+ * @exclusive: Permissions for exclusive request or shared request of the
+ * device.
* @pd: generic_pm_domain for use with the genpd framework
+ * @node: link for the genpd list
+ * @parent: link to the parent TI SCI genpd provider
*/
struct ti_sci_pm_domain {
- const struct ti_sci_handle *ti_sci;
- struct device *dev;
+ int idx;
+ u8 exclusive;
struct generic_pm_domain pd;
+ struct list_head node;
+ struct ti_sci_genpd_provider *parent;
};
#define genpd_to_ti_sci_pd(gpd) container_of(gpd, struct ti_sci_pm_domain, pd)
-/**
- * ti_sci_dev_id(): get prepopulated ti_sci id from struct dev
- * @dev: pointer to device associated with this genpd
- *
- * Returns device_id stored from ti,sci_id property
- */
-static int ti_sci_dev_id(struct device *dev)
-{
- struct generic_pm_domain_data *genpd_data = dev_gpd_data(dev);
- struct ti_sci_genpd_dev_data *sci_dev_data = genpd_data->data;
-
- return sci_dev_data->idx;
-}
-
-static u8 is_ti_sci_dev_exclusive(struct device *dev)
-{
- struct generic_pm_domain_data *genpd_data = dev_gpd_data(dev);
- struct ti_sci_genpd_dev_data *sci_dev_data = genpd_data->data;
-
- return sci_dev_data->exclusive;
-}
-
-/**
- * ti_sci_dev_to_sci_handle(): get pointer to ti_sci_handle
- * @dev: pointer to device associated with this genpd
- *
- * Returns ti_sci_handle to be used to communicate with system
- * control processor.
+/*
+ * ti_sci_pd_power_off(): genpd power down hook
+ * @domain: pointer to the powerdomain to power off
*/
-static const struct ti_sci_handle *ti_sci_dev_to_sci_handle(struct device *dev)
+static int ti_sci_pd_power_off(struct generic_pm_domain *domain)
{
- struct generic_pm_domain *pd = pd_to_genpd(dev->pm_domain);
- struct ti_sci_pm_domain *ti_sci_genpd = genpd_to_ti_sci_pd(pd);
+ struct ti_sci_pm_domain *pd = genpd_to_ti_sci_pd(domain);
+ const struct ti_sci_handle *ti_sci = pd->parent->ti_sci;
- return ti_sci_genpd->ti_sci;
+ return ti_sci->ops.dev_ops.put_device(ti_sci, pd->idx);
}
-/**
- * ti_sci_dev_start(): genpd device start hook called to turn device on
- * @dev: pointer to device associated with this genpd to be powered on
+/*
+ * ti_sci_pd_power_on(): genpd power up hook
+ * @domain: pointer to the powerdomain to power on
*/
-static int ti_sci_dev_start(struct device *dev)
+static int ti_sci_pd_power_on(struct generic_pm_domain *domain)
{
- const struct ti_sci_handle *ti_sci = ti_sci_dev_to_sci_handle(dev);
- int idx = ti_sci_dev_id(dev);
+ struct ti_sci_pm_domain *pd = genpd_to_ti_sci_pd(domain);
+ const struct ti_sci_handle *ti_sci = pd->parent->ti_sci;
- if (is_ti_sci_dev_exclusive(dev))
- return ti_sci->ops.dev_ops.get_device_exclusive(ti_sci, idx);
+ if (pd->exclusive)
+ return ti_sci->ops.dev_ops.get_device_exclusive(ti_sci,
+ pd->idx);
else
- return ti_sci->ops.dev_ops.get_device(ti_sci, idx);
+ return ti_sci->ops.dev_ops.get_device(ti_sci, pd->idx);
}
-/**
- * ti_sci_dev_stop(): genpd device stop hook called to turn device off
- * @dev: pointer to device associated with this genpd to be powered off
+/*
+ * ti_sci_pd_xlate(): translation service for TI SCI genpds
+ * @genpdspec: DT identification data for the genpd
+ * @data: genpd core data for all the powerdomains on the device
*/
-static int ti_sci_dev_stop(struct device *dev)
+static struct generic_pm_domain *ti_sci_pd_xlate(
+ struct of_phandle_args *genpdspec,
+ void *data)
{
- const struct ti_sci_handle *ti_sci = ti_sci_dev_to_sci_handle(dev);
- int idx = ti_sci_dev_id(dev);
+ struct genpd_onecell_data *genpd_data = data;
+ unsigned int idx = genpdspec->args[0];
- return ti_sci->ops.dev_ops.put_device(ti_sci, idx);
-}
+ if (genpdspec->args_count < 2)
+ return ERR_PTR(-EINVAL);
-static int ti_sci_pd_attach_dev(struct generic_pm_domain *domain,
- struct device *dev)
-{
- struct device_node *np = dev->of_node;
- struct of_phandle_args pd_args;
- struct ti_sci_pm_domain *ti_sci_genpd = genpd_to_ti_sci_pd(domain);
- const struct ti_sci_handle *ti_sci = ti_sci_genpd->ti_sci;
- struct ti_sci_genpd_dev_data *sci_dev_data;
- struct generic_pm_domain_data *genpd_data;
- int idx, ret = 0;
-
- ret = of_parse_phandle_with_args(np, "power-domains",
- "#power-domain-cells", 0, &pd_args);
- if (ret < 0)
- return ret;
-
- if (pd_args.args_count != 1 && pd_args.args_count != 2)
- return -EINVAL;
-
- idx = pd_args.args[0];
-
- /*
- * Check the validity of the requested idx, if the index is not valid
- * the PMMC will return a NAK here and we will not allocate it.
- */
- ret = ti_sci->ops.dev_ops.is_valid(ti_sci, idx);
- if (ret)
- return -EINVAL;
-
- sci_dev_data = kzalloc(sizeof(*sci_dev_data), GFP_KERNEL);
- if (!sci_dev_data)
- return -ENOMEM;
+ if (idx >= genpd_data->num_domains) {
+ pr_err("%s: invalid domain index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
- sci_dev_data->idx = idx;
- /* Enable the exclusive permissions by default */
- sci_dev_data->exclusive = TI_SCI_PD_EXCLUSIVE;
- if (pd_args.args_count == 2)
- sci_dev_data->exclusive = pd_args.args[1] & 0x1;
+ if (!genpd_data->domains[idx])
+ return ERR_PTR(-ENOENT);
- genpd_data = dev_gpd_data(dev);
- genpd_data->data = sci_dev_data;
+ genpd_to_ti_sci_pd(genpd_data->domains[idx])->exclusive =
+ genpdspec->args[1];
- return 0;
-}
-
-static void ti_sci_pd_detach_dev(struct generic_pm_domain *domain,
- struct device *dev)
-{
- struct generic_pm_domain_data *genpd_data = dev_gpd_data(dev);
- struct ti_sci_genpd_dev_data *sci_dev_data = genpd_data->data;
-
- kfree(sci_dev_data);
- genpd_data->data = NULL;
+ return genpd_data->domains[idx];
}
static const struct of_device_id ti_sci_pm_domain_matches[] = {
@@ -173,33 +117,80 @@ MODULE_DEVICE_TABLE(of, ti_sci_pm_domain_matches);
static int ti_sci_pm_domain_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
- struct ti_sci_pm_domain *ti_sci_pd;
+ struct ti_sci_genpd_provider *pd_provider;
+ struct ti_sci_pm_domain *pd;
+ struct device_node *np = NULL;
+ struct of_phandle_args args;
int ret;
+ u32 max_id = 0;
+ int index;
- ti_sci_pd = devm_kzalloc(dev, sizeof(*ti_sci_pd), GFP_KERNEL);
- if (!ti_sci_pd)
+ pd_provider = devm_kzalloc(dev, sizeof(*pd_provider), GFP_KERNEL);
+ if (!pd_provider)
return -ENOMEM;
- ti_sci_pd->ti_sci = devm_ti_sci_get_handle(dev);
- if (IS_ERR(ti_sci_pd->ti_sci))
- return PTR_ERR(ti_sci_pd->ti_sci);
+ pd_provider->ti_sci = devm_ti_sci_get_handle(dev);
+ if (IS_ERR(pd_provider->ti_sci))
+ return PTR_ERR(pd_provider->ti_sci);
+
+ pd_provider->dev = dev;
+
+ INIT_LIST_HEAD(&pd_provider->pd_list);
+
+ /* Find highest device ID used for power domains */
+ while (1) {
+ np = of_find_node_with_property(np, "power-domains");
+ if (!np)
+ break;
+
+ index = 0;
+
+ while (1) {
+ ret = of_parse_phandle_with_args(np, "power-domains",
+ "#power-domain-cells",
+ index, &args);
+ if (ret)
+ break;
+
+ if (args.args_count >= 1 && args.np == dev->of_node) {
+ if (args.args[0] > max_id)
+ max_id = args.args[0];
+
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ pd->pd.name = devm_kasprintf(dev, GFP_KERNEL,
+ "pd:%d",
+ args.args[0]);
+ if (!pd->pd.name)
+ return -ENOMEM;
- ti_sci_pd->dev = dev;
+ pd->pd.power_off = ti_sci_pd_power_off;
+ pd->pd.power_on = ti_sci_pd_power_on;
+ pd->idx = args.args[0];
+ pd->parent = pd_provider;
- ti_sci_pd->pd.name = "ti_sci_pd";
+ pm_genpd_init(&pd->pd, NULL, true);
- ti_sci_pd->pd.attach_dev = ti_sci_pd_attach_dev;
- ti_sci_pd->pd.detach_dev = ti_sci_pd_detach_dev;
+ list_add(&pd->node, &pd_provider->pd_list);
+ }
+ index++;
+ }
+ }
- ti_sci_pd->pd.dev_ops.start = ti_sci_dev_start;
- ti_sci_pd->pd.dev_ops.stop = ti_sci_dev_stop;
+ pd_provider->data.domains =
+ devm_kcalloc(dev, max_id + 1,
+ sizeof(*pd_provider->data.domains),
+ GFP_KERNEL);
- pm_genpd_init(&ti_sci_pd->pd, NULL, true);
+ pd_provider->data.num_domains = max_id + 1;
+ pd_provider->data.xlate = ti_sci_pd_xlate;
- ret = of_genpd_add_provider_simple(np, &ti_sci_pd->pd);
+ list_for_each_entry(pd, &pd_provider->pd_list, node)
+ pd_provider->data.domains[pd->idx] = &pd->pd;
- return ret;
+ return of_genpd_add_provider_onecell(dev->of_node, &pd_provider->data);
}
static struct platform_driver ti_sci_pm_domains_driver = {