summaryrefslogtreecommitdiff
path: root/drivers/remoteproc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/remoteproc')
-rw-r--r--drivers/remoteproc/Kconfig26
-rw-r--r--drivers/remoteproc/Makefile1
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c41
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c30
-rw-r--r--drivers/remoteproc/imx_rproc.c105
-rw-r--r--drivers/remoteproc/imx_rproc.h6
-rw-r--r--drivers/remoteproc/ingenic_rproc.c3
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c40
-rw-r--r--drivers/remoteproc/meson_mx_ao_arc.c2
-rw-r--r--drivers/remoteproc/mtk_common.h11
-rw-r--r--drivers/remoteproc/mtk_scp.c272
-rw-r--r--drivers/remoteproc/mtk_scp_ipi.c7
-rw-r--r--drivers/remoteproc/omap_remoteproc.c71
-rw-r--r--drivers/remoteproc/pru_rproc.c4
-rw-r--r--drivers/remoteproc/qcom_common.c87
-rw-r--r--drivers/remoteproc/qcom_common.h10
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c33
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c243
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c196
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c117
-rw-r--r--drivers/remoteproc/qcom_sysmon.c2
-rw-r--r--drivers/remoteproc/qcom_wcnss.c36
-rw-r--r--drivers/remoteproc/qcom_wcnss_iris.c5
-rw-r--r--drivers/remoteproc/rcar_rproc.c2
-rw-r--r--drivers/remoteproc/remoteproc_core.c21
-rw-r--r--drivers/remoteproc/remoteproc_internal.h2
-rw-r--r--drivers/remoteproc/remoteproc_sysfs.c2
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c14
-rw-r--r--drivers/remoteproc/st_remoteproc.c56
-rw-r--r--drivers/remoteproc/st_slim_rproc.c6
-rw-r--r--drivers/remoteproc/stm32_rproc.c4
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c120
-rw-r--r--drivers/remoteproc/ti_k3_m4_remoteproc.c665
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c346
-rw-r--r--drivers/remoteproc/ti_sci_proc.h26
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c2
-rw-r--r--drivers/remoteproc/xlnx_r5_remoteproc.c611
37 files changed, 2379 insertions, 846 deletions
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 48845dc8fa85..83962a114dc9 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -166,6 +166,7 @@ config QCOM_PIL_INFO
config QCOM_RPROC_COMMON
tristate
+ select AUXILIARY_BUS
config QCOM_Q6V5_COMMON
tristate
@@ -328,9 +329,9 @@ config STM32_RPROC
config TI_K3_DSP_REMOTEPROC
tristate "TI K3 DSP remoteproc support"
- depends on ARCH_K3
- select MAILBOX
- select OMAP2PLUS_MBOX
+ depends on ARCH_K3 || COMPILE_TEST
+ depends on TI_SCI_PROTOCOL || (COMPILE_TEST && TI_SCI_PROTOCOL=n)
+ depends on OMAP2PLUS_MBOX
help
Say m here to support TI's C66x and C71x DSP remote processor
subsystems on various TI K3 family of SoCs through the remote
@@ -339,11 +340,24 @@ config TI_K3_DSP_REMOTEPROC
It's safe to say N here if you're not interested in utilizing
the DSP slave processors.
+config TI_K3_M4_REMOTEPROC
+ tristate "TI K3 M4 remoteproc support"
+ depends on ARCH_K3 || COMPILE_TEST
+ depends on TI_SCI_PROTOCOL || (COMPILE_TEST && TI_SCI_PROTOCOL=n)
+ depends on OMAP2PLUS_MBOX
+ help
+ Say m here to support TI's M4 remote processor subsystems
+ on various TI K3 family of SoCs through the remote processor
+ framework.
+
+ It's safe to say N here if you're not interested in utilizing
+ a remote processor.
+
config TI_K3_R5_REMOTEPROC
tristate "TI K3 R5 remoteproc support"
- depends on ARCH_K3
- select MAILBOX
- select OMAP2PLUS_MBOX
+ depends on ARCH_K3 || COMPILE_TEST
+ depends on TI_SCI_PROTOCOL || (COMPILE_TEST && TI_SCI_PROTOCOL=n)
+ depends on OMAP2PLUS_MBOX
help
Say m here to support TI's R5F remote processor subsystems
on various TI K3 family of SoCs through the remote processor
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 91314a9b43ce..5ff4e2fee4ab 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -37,5 +37,6 @@ obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o
obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o
obj-$(CONFIG_STM32_RPROC) += stm32_rproc.o
obj-$(CONFIG_TI_K3_DSP_REMOTEPROC) += ti_k3_dsp_remoteproc.o
+obj-$(CONFIG_TI_K3_M4_REMOTEPROC) += ti_k3_m4_remoteproc.o
obj-$(CONFIG_TI_K3_R5_REMOTEPROC) += ti_k3_r5_remoteproc.o
obj-$(CONFIG_XLNX_R5_REMOTEPROC) += xlnx_r5_remoteproc.o
diff --git a/drivers/remoteproc/da8xx_remoteproc.c b/drivers/remoteproc/da8xx_remoteproc.c
index 9041a0e07fb2..93031f0867d1 100644
--- a/drivers/remoteproc/da8xx_remoteproc.c
+++ b/drivers/remoteproc/da8xx_remoteproc.c
@@ -239,8 +239,6 @@ static int da8xx_rproc_probe(struct platform_device *pdev)
struct da8xx_rproc *drproc;
struct rproc *rproc;
struct irq_data *irq_data;
- struct resource *bootreg_res;
- struct resource *chipsig_res;
struct clk *dsp_clk;
struct reset_control *dsp_reset;
void __iomem *chipsig;
@@ -253,46 +251,29 @@ static int da8xx_rproc_probe(struct platform_device *pdev)
return irq;
irq_data = irq_get_irq_data(irq);
- if (!irq_data) {
- dev_err(dev, "irq_get_irq_data(%d): NULL\n", irq);
- return -EINVAL;
- }
+ if (!irq_data)
+ return dev_err_probe(dev, -EINVAL, "irq_get_irq_data(%d): NULL\n", irq);
- bootreg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "host1cfg");
- bootreg = devm_ioremap_resource(dev, bootreg_res);
+ bootreg = devm_platform_ioremap_resource_byname(pdev, "host1cfg");
if (IS_ERR(bootreg))
return PTR_ERR(bootreg);
- chipsig_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "chipsig");
- chipsig = devm_ioremap_resource(dev, chipsig_res);
+ chipsig = devm_platform_ioremap_resource_byname(pdev, "chipsig");
if (IS_ERR(chipsig))
return PTR_ERR(chipsig);
dsp_clk = devm_clk_get(dev, NULL);
- if (IS_ERR(dsp_clk)) {
- dev_err(dev, "clk_get error: %ld\n", PTR_ERR(dsp_clk));
-
- return PTR_ERR(dsp_clk);
- }
+ if (IS_ERR(dsp_clk))
+ return dev_err_probe(dev, PTR_ERR(dsp_clk), "clk_get error\n");
dsp_reset = devm_reset_control_get_exclusive(dev, NULL);
- if (IS_ERR(dsp_reset)) {
- if (PTR_ERR(dsp_reset) != -EPROBE_DEFER)
- dev_err(dev, "unable to get reset control: %ld\n",
- PTR_ERR(dsp_reset));
-
- return PTR_ERR(dsp_reset);
- }
+ if (IS_ERR(dsp_reset))
+ return dev_err_probe(dev, PTR_ERR(dsp_reset), "unable to get reset control\n");
if (dev->of_node) {
ret = of_reserved_mem_device_init(dev);
- if (ret) {
- dev_err(dev, "device does not have specific CMA pool: %d\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "device does not have specific CMA pool\n");
}
rproc = rproc_alloc(dev, "dsp", &da8xx_rproc_ops, da8xx_fw_name,
@@ -384,7 +365,7 @@ MODULE_DEVICE_TABLE(of, davinci_rproc_of_match);
static struct platform_driver da8xx_rproc_driver = {
.probe = da8xx_rproc_probe,
- .remove_new = da8xx_rproc_remove,
+ .remove = da8xx_rproc_remove,
.driver = {
.name = "davinci-rproc",
.of_match_table = of_match_ptr(davinci_rproc_of_match),
diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
index 087506e21508..90cb1fc13e71 100644
--- a/drivers/remoteproc/imx_dsp_rproc.c
+++ b/drivers/remoteproc/imx_dsp_rproc.c
@@ -19,6 +19,7 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/remoteproc.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include "imx_rproc.h"
@@ -95,6 +96,7 @@ enum imx_dsp_rp_mbox_messages {
/**
* struct imx_dsp_rproc - DSP remote processor state
* @regmap: regmap handler
+ * @run_stall: reset control handle used for Run/Stall operation
* @rproc: rproc handler
* @dsp_dcfg: device configuration pointer
* @clks: clocks needed by this device
@@ -111,6 +113,7 @@ enum imx_dsp_rp_mbox_messages {
*/
struct imx_dsp_rproc {
struct regmap *regmap;
+ struct reset_control *run_stall;
struct rproc *rproc;
const struct imx_dsp_rproc_dcfg *dsp_dcfg;
struct clk_bulk_data clks[DSP_RPROC_CLK_MAX];
@@ -192,9 +195,7 @@ static int imx8mp_dsp_reset(struct imx_dsp_rproc *priv)
/* Keep reset asserted for 10 cycles */
usleep_range(1, 2);
- regmap_update_bits(priv->regmap, IMX8M_AudioDSP_REG2,
- IMX8M_AudioDSP_REG2_RUNSTALL,
- IMX8M_AudioDSP_REG2_RUNSTALL);
+ reset_control_assert(priv->run_stall);
/* Take the DSP out of reset and keep stalled for FW loading */
pwrctl = readl(dap + IMX8M_DAP_PWRCTL);
@@ -231,13 +232,9 @@ static int imx8ulp_dsp_reset(struct imx_dsp_rproc *priv)
/* Specific configuration for i.MX8MP */
static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8mp = {
- .src_reg = IMX8M_AudioDSP_REG2,
- .src_mask = IMX8M_AudioDSP_REG2_RUNSTALL,
- .src_start = 0,
- .src_stop = IMX8M_AudioDSP_REG2_RUNSTALL,
.att = imx_dsp_rproc_att_imx8mp,
.att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8mp),
- .method = IMX_RPROC_MMIO,
+ .method = IMX_RPROC_RESET_CONTROLLER,
};
static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8mp = {
@@ -329,6 +326,9 @@ static int imx_dsp_rproc_start(struct rproc *rproc)
true,
rproc->bootaddr);
break;
+ case IMX_RPROC_RESET_CONTROLLER:
+ ret = reset_control_deassert(priv->run_stall);
+ break;
default:
return -EOPNOTSUPP;
}
@@ -369,6 +369,9 @@ static int imx_dsp_rproc_stop(struct rproc *rproc)
false,
rproc->bootaddr);
break;
+ case IMX_RPROC_RESET_CONTROLLER:
+ ret = reset_control_assert(priv->run_stall);
+ break;
default:
return -EOPNOTSUPP;
}
@@ -509,7 +512,7 @@ static int imx_dsp_rproc_mbox_alloc(struct imx_dsp_rproc *priv)
struct mbox_client *cl;
int ret;
- if (!of_get_property(dev->of_node, "mbox-names", NULL))
+ if (!of_property_present(dev->of_node, "mbox-names"))
return 0;
cl = &priv->cl;
@@ -995,6 +998,13 @@ static int imx_dsp_rproc_detect_mode(struct imx_dsp_rproc *priv)
priv->regmap = regmap;
break;
+ case IMX_RPROC_RESET_CONTROLLER:
+ priv->run_stall = devm_reset_control_get_exclusive(dev, "runstall");
+ if (IS_ERR(priv->run_stall)) {
+ dev_err(dev, "Failed to get DSP runstall reset control\n");
+ return PTR_ERR(priv->run_stall);
+ }
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -1258,7 +1268,7 @@ MODULE_DEVICE_TABLE(of, imx_dsp_rproc_of_match);
static struct platform_driver imx_dsp_rproc_driver = {
.probe = imx_dsp_rproc_probe,
- .remove_new = imx_dsp_rproc_remove,
+ .remove = imx_dsp_rproc_remove,
.driver = {
.name = "imx-dsp-rproc",
.of_match_table = imx_dsp_rproc_of_match,
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 5a3fb902acc9..74299af1d7f1 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -18,6 +18,7 @@
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
+#include <linux/reboot.h>
#include <linux/regmap.h>
#include <linux/remoteproc.h>
#include <linux/workqueue.h>
@@ -90,7 +91,7 @@ struct imx_rproc_mem {
#define ATT_CORE_MASK 0xffff
#define ATT_CORE(I) BIT((I))
-static int imx_rproc_xtr_mbox_init(struct rproc *rproc);
+static int imx_rproc_xtr_mbox_init(struct rproc *rproc, bool tx_block);
static void imx_rproc_free_mbox(struct rproc *rproc);
struct imx_rproc {
@@ -119,20 +120,16 @@ struct imx_rproc {
static const struct imx_rproc_att imx_rproc_att_imx93[] = {
/* dev addr , sys addr , size , flags */
/* TCM CODE NON-SECURE */
- { 0x0FFC0000, 0x201C0000, 0x00020000, ATT_OWN | ATT_IOMEM },
- { 0x0FFE0000, 0x201E0000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ { 0x0FFC0000, 0x201C0000, 0x00040000, ATT_OWN | ATT_IOMEM },
/* TCM CODE SECURE */
- { 0x1FFC0000, 0x201C0000, 0x00020000, ATT_OWN | ATT_IOMEM },
- { 0x1FFE0000, 0x201E0000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ { 0x1FFC0000, 0x201C0000, 0x00040000, ATT_OWN | ATT_IOMEM },
/* TCM SYS NON-SECURE*/
- { 0x20000000, 0x20200000, 0x00020000, ATT_OWN | ATT_IOMEM },
- { 0x20020000, 0x20220000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ { 0x20000000, 0x20200000, 0x00040000, ATT_OWN | ATT_IOMEM },
/* TCM SYS SECURE*/
- { 0x30000000, 0x20200000, 0x00020000, ATT_OWN | ATT_IOMEM },
- { 0x30020000, 0x20220000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ { 0x30000000, 0x20200000, 0x00040000, ATT_OWN | ATT_IOMEM },
/* DDR */
{ 0x80000000, 0x80000000, 0x10000000, 0 },
@@ -210,11 +207,9 @@ static const struct imx_rproc_att imx_rproc_att_imx8mq[] = {
/* QSPI Code - alias */
{ 0x08000000, 0x08000000, 0x08000000, 0 },
/* DDR (Code) - alias */
- { 0x10000000, 0x80000000, 0x0FFE0000, 0 },
- /* TCML */
- { 0x1FFE0000, 0x007E0000, 0x00020000, ATT_OWN | ATT_IOMEM},
- /* TCMU */
- { 0x20000000, 0x00800000, 0x00020000, ATT_OWN | ATT_IOMEM},
+ { 0x10000000, 0x40000000, 0x0FFE0000, 0 },
+ /* TCML/U */
+ { 0x1FFE0000, 0x007E0000, 0x00040000, ATT_OWN | ATT_IOMEM},
/* OCRAM_S */
{ 0x20180000, 0x00180000, 0x00008000, ATT_OWN },
/* OCRAM */
@@ -339,6 +334,7 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx7ulp = {
.att = imx_rproc_att_imx7ulp,
.att_size = ARRAY_SIZE(imx_rproc_att_imx7ulp),
.method = IMX_RPROC_NONE,
+ .flags = IMX_RPROC_NEED_SYSTEM_OFF,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx7d = {
@@ -375,7 +371,7 @@ static int imx_rproc_start(struct rproc *rproc)
struct arm_smccc_res res;
int ret;
- ret = imx_rproc_xtr_mbox_init(rproc);
+ ret = imx_rproc_xtr_mbox_init(rproc, true);
if (ret)
return ret;
@@ -635,7 +631,7 @@ static void imx_rproc_kick(struct rproc *rproc, int vqid)
static int imx_rproc_attach(struct rproc *rproc)
{
- return imx_rproc_xtr_mbox_init(rproc);
+ return imx_rproc_xtr_mbox_init(rproc, true);
}
static int imx_rproc_detach(struct rproc *rproc)
@@ -666,6 +662,17 @@ static struct resource_table *imx_rproc_get_loaded_rsc_table(struct rproc *rproc
return (struct resource_table *)priv->rsc_table;
}
+static struct resource_table *
+imx_rproc_elf_find_loaded_rsc_table(struct rproc *rproc, const struct firmware *fw)
+{
+ struct imx_rproc *priv = rproc->priv;
+
+ if (priv->rsc_table)
+ return (struct resource_table *)priv->rsc_table;
+
+ return rproc_elf_find_loaded_rsc_table(rproc, fw);
+}
+
static const struct rproc_ops imx_rproc_ops = {
.prepare = imx_rproc_prepare,
.attach = imx_rproc_attach,
@@ -676,7 +683,7 @@ static const struct rproc_ops imx_rproc_ops = {
.da_to_va = imx_rproc_da_to_va,
.load = rproc_elf_load_segments,
.parse_fw = imx_rproc_parse_fw,
- .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
+ .find_loaded_rsc_table = imx_rproc_elf_find_loaded_rsc_table,
.get_loaded_rsc_table = imx_rproc_get_loaded_rsc_table,
.sanity_check = rproc_elf_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
@@ -726,31 +733,37 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
struct resource res;
node = of_parse_phandle(np, "memory-region", a);
+ if (!node)
+ continue;
/* Not map vdevbuffer, vdevring region */
if (!strncmp(node->name, "vdev", strlen("vdev"))) {
of_node_put(node);
continue;
}
err = of_address_to_resource(node, 0, &res);
- of_node_put(node);
if (err) {
dev_err(dev, "unable to resolve memory region\n");
+ of_node_put(node);
return err;
}
- if (b >= IMX_RPROC_MEM_MAX)
+ if (b >= IMX_RPROC_MEM_MAX) {
+ of_node_put(node);
break;
+ }
/* Not use resource version, because we might share region */
priv->mem[b].cpu_addr = devm_ioremap_wc(&pdev->dev, res.start, resource_size(&res));
if (!priv->mem[b].cpu_addr) {
dev_err(dev, "failed to remap %pr\n", &res);
+ of_node_put(node);
return -ENOMEM;
}
priv->mem[b].sys_addr = res.start;
priv->mem[b].size = resource_size(&res);
if (!strcmp(node->name, "rsc-table"))
priv->rsc_table = priv->mem[b].cpu_addr;
+ of_node_put(node);
b++;
}
@@ -783,7 +796,7 @@ static void imx_rproc_rx_callback(struct mbox_client *cl, void *msg)
queue_work(priv->workqueue, &priv->rproc_work);
}
-static int imx_rproc_xtr_mbox_init(struct rproc *rproc)
+static int imx_rproc_xtr_mbox_init(struct rproc *rproc, bool tx_block)
{
struct imx_rproc *priv = rproc->priv;
struct device *dev = priv->dev;
@@ -801,12 +814,12 @@ static int imx_rproc_xtr_mbox_init(struct rproc *rproc)
if (priv->tx_ch && priv->rx_ch)
return 0;
- if (!of_get_property(dev->of_node, "mbox-names", NULL))
+ if (!of_property_present(dev->of_node, "mbox-names"))
return 0;
cl = &priv->cl;
cl->dev = dev;
- cl->tx_block = true;
+ cl->tx_block = tx_block;
cl->tx_tout = 100;
cl->knows_txdone = false;
cl->rx_callback = imx_rproc_rx_callback;
@@ -1039,6 +1052,22 @@ static int imx_rproc_clk_enable(struct imx_rproc *priv)
return 0;
}
+static int imx_rproc_sys_off_handler(struct sys_off_data *data)
+{
+ struct rproc *rproc = data->cb_data;
+ int ret;
+
+ imx_rproc_free_mbox(rproc);
+
+ ret = imx_rproc_xtr_mbox_init(rproc, false);
+ if (ret) {
+ dev_err(&rproc->dev, "Failed to request non-blocking mbox\n");
+ return NOTIFY_BAD;
+ }
+
+ return NOTIFY_DONE;
+}
+
static int imx_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1070,7 +1099,9 @@ static int imx_rproc_probe(struct platform_device *pdev)
return -ENOMEM;
}
- ret = imx_rproc_xtr_mbox_init(rproc);
+ INIT_WORK(&priv->rproc_work, imx_rproc_vq_work);
+
+ ret = imx_rproc_xtr_mbox_init(rproc, true);
if (ret)
goto err_put_wkq;
@@ -1088,11 +1119,33 @@ static int imx_rproc_probe(struct platform_device *pdev)
if (ret)
goto err_put_scu;
- INIT_WORK(&priv->rproc_work, imx_rproc_vq_work);
-
if (rproc->state != RPROC_DETACHED)
rproc->auto_boot = of_property_read_bool(np, "fsl,auto-boot");
+ if (dcfg->flags & IMX_RPROC_NEED_SYSTEM_OFF) {
+ /*
+ * setup mailbox to non-blocking mode in
+ * [SYS_OFF_MODE_POWER_OFF_PREPARE, SYS_OFF_MODE_RESTART_PREPARE]
+ * phase before invoking [SYS_OFF_MODE_POWER_OFF, SYS_OFF_MODE_RESTART]
+ * atomic chain, see kernel/reboot.c.
+ */
+ ret = devm_register_sys_off_handler(dev, SYS_OFF_MODE_POWER_OFF_PREPARE,
+ SYS_OFF_PRIO_DEFAULT,
+ imx_rproc_sys_off_handler, rproc);
+ if (ret) {
+ dev_err(dev, "register power off handler failure\n");
+ goto err_put_clk;
+ }
+
+ ret = devm_register_sys_off_handler(dev, SYS_OFF_MODE_RESTART_PREPARE,
+ SYS_OFF_PRIO_DEFAULT,
+ imx_rproc_sys_off_handler, rproc);
+ if (ret) {
+ dev_err(dev, "register restart handler failure\n");
+ goto err_put_clk;
+ }
+ }
+
ret = rproc_add(rproc);
if (ret) {
dev_err(dev, "rproc_add failed\n");
@@ -1145,7 +1198,7 @@ MODULE_DEVICE_TABLE(of, imx_rproc_of_match);
static struct platform_driver imx_rproc_driver = {
.probe = imx_rproc_probe,
- .remove_new = imx_rproc_remove,
+ .remove = imx_rproc_remove,
.driver = {
.name = "imx-rproc",
.of_match_table = imx_rproc_of_match,
diff --git a/drivers/remoteproc/imx_rproc.h b/drivers/remoteproc/imx_rproc.h
index 79a1b8956d14..cfd38d37e146 100644
--- a/drivers/remoteproc/imx_rproc.h
+++ b/drivers/remoteproc/imx_rproc.h
@@ -24,8 +24,13 @@ enum imx_rproc_method {
IMX_RPROC_SMC,
/* Through System Control Unit API */
IMX_RPROC_SCU_API,
+ /* Through Reset Controller API */
+ IMX_RPROC_RESET_CONTROLLER,
};
+/* dcfg flags */
+#define IMX_RPROC_NEED_SYSTEM_OFF BIT(0)
+
struct imx_rproc_dcfg {
u32 src_reg;
u32 src_mask;
@@ -36,6 +41,7 @@ struct imx_rproc_dcfg {
const struct imx_rproc_att *att;
size_t att_size;
enum imx_rproc_method method;
+ u32 flags;
};
#endif /* _IMX_RPROC_H */
diff --git a/drivers/remoteproc/ingenic_rproc.c b/drivers/remoteproc/ingenic_rproc.c
index 9902cce28692..1b78d8ddeacf 100644
--- a/drivers/remoteproc/ingenic_rproc.c
+++ b/drivers/remoteproc/ingenic_rproc.c
@@ -183,8 +183,7 @@ static int ingenic_rproc_probe(struct platform_device *pdev)
vpu->dev = &pdev->dev;
platform_set_drvdata(pdev, vpu);
- mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aux");
- vpu->aux_base = devm_ioremap_resource(dev, mem);
+ vpu->aux_base = devm_platform_ioremap_resource_byname(pdev, "aux");
if (IS_ERR(vpu->aux_base)) {
dev_err(dev, "Failed to ioremap\n");
return PTR_ERR(vpu->aux_base);
diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c
index 7e57b90bcaf8..7b41b4547fa8 100644
--- a/drivers/remoteproc/keystone_remoteproc.c
+++ b/drivers/remoteproc/keystone_remoteproc.c
@@ -335,25 +335,16 @@ static int keystone_rproc_of_get_dev_syscon(struct platform_device *pdev,
{
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
- int ret;
if (!of_property_read_bool(np, "ti,syscon-dev")) {
dev_err(dev, "ti,syscon-dev property is absent\n");
return -EINVAL;
}
- ksproc->dev_ctrl =
- syscon_regmap_lookup_by_phandle(np, "ti,syscon-dev");
- if (IS_ERR(ksproc->dev_ctrl)) {
- ret = PTR_ERR(ksproc->dev_ctrl);
- return ret;
- }
-
- if (of_property_read_u32_index(np, "ti,syscon-dev", 1,
- &ksproc->boot_offset)) {
- dev_err(dev, "couldn't read the boot register offset\n");
- return -EINVAL;
- }
+ ksproc->dev_ctrl = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-dev",
+ 1, &ksproc->boot_offset);
+ if (IS_ERR(ksproc->dev_ctrl))
+ return PTR_ERR(ksproc->dev_ctrl);
return 0;
}
@@ -366,8 +357,6 @@ static int keystone_rproc_probe(struct platform_device *pdev)
struct rproc *rproc;
int dsp_id;
char *fw_name = NULL;
- char *template = "keystone-dsp%d-fw";
- int name_len = 0;
int ret = 0;
if (!np) {
@@ -382,14 +371,12 @@ static int keystone_rproc_probe(struct platform_device *pdev)
}
/* construct a custom default fw name - subject to change in future */
- name_len = strlen(template); /* assuming a single digit alias */
- fw_name = devm_kzalloc(dev, name_len, GFP_KERNEL);
+ fw_name = devm_kasprintf(dev, GFP_KERNEL, "keystone-dsp%d-fw", dsp_id);
if (!fw_name)
return -ENOMEM;
- snprintf(fw_name, name_len, template, dsp_id);
- rproc = rproc_alloc(dev, dev_name(dev), &keystone_rproc_ops, fw_name,
- sizeof(*ksproc));
+ rproc = devm_rproc_alloc(dev, dev_name(dev), &keystone_rproc_ops,
+ fw_name, sizeof(*ksproc));
if (!rproc)
return -ENOMEM;
@@ -400,13 +387,11 @@ static int keystone_rproc_probe(struct platform_device *pdev)
ret = keystone_rproc_of_get_dev_syscon(pdev, ksproc);
if (ret)
- goto free_rproc;
+ return ret;
ksproc->reset = devm_reset_control_get_exclusive(dev, NULL);
- if (IS_ERR(ksproc->reset)) {
- ret = PTR_ERR(ksproc->reset);
- goto free_rproc;
- }
+ if (IS_ERR(ksproc->reset))
+ return PTR_ERR(ksproc->reset);
/* enable clock for accessing DSP internal memories */
pm_runtime_enable(dev);
@@ -471,8 +456,6 @@ disable_clk:
pm_runtime_put_sync(dev);
disable_rpm:
pm_runtime_disable(dev);
-free_rproc:
- rproc_free(rproc);
return ret;
}
@@ -484,7 +467,6 @@ static void keystone_rproc_remove(struct platform_device *pdev)
gpiod_put(ksproc->kick_gpio);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- rproc_free(ksproc->rproc);
of_reserved_mem_device_release(&pdev->dev);
}
@@ -499,7 +481,7 @@ MODULE_DEVICE_TABLE(of, keystone_rproc_of_match);
static struct platform_driver keystone_rproc_driver = {
.probe = keystone_rproc_probe,
- .remove_new = keystone_rproc_remove,
+ .remove = keystone_rproc_remove,
.driver = {
.name = "keystone-rproc",
.of_match_table = keystone_rproc_of_match,
diff --git a/drivers/remoteproc/meson_mx_ao_arc.c b/drivers/remoteproc/meson_mx_ao_arc.c
index f6744b538323..7dfdf11b0036 100644
--- a/drivers/remoteproc/meson_mx_ao_arc.c
+++ b/drivers/remoteproc/meson_mx_ao_arc.c
@@ -246,7 +246,7 @@ MODULE_DEVICE_TABLE(of, meson_mx_ao_arc_rproc_match);
static struct platform_driver meson_mx_ao_arc_rproc_driver = {
.probe = meson_mx_ao_arc_rproc_probe,
- .remove_new = meson_mx_ao_arc_rproc_remove,
+ .remove = meson_mx_ao_arc_rproc_remove,
.driver = {
.name = "meson-mx-ao-arc-rproc",
.of_match_table = meson_mx_ao_arc_rproc_match,
diff --git a/drivers/remoteproc/mtk_common.h b/drivers/remoteproc/mtk_common.h
index 6d7736a031f7..fd5c539ab2ac 100644
--- a/drivers/remoteproc/mtk_common.h
+++ b/drivers/remoteproc/mtk_common.h
@@ -78,7 +78,6 @@
#define MT8195_L2TCM_OFFSET 0x850d0
#define SCP_FW_VER_LEN 32
-#define SCP_SHARE_BUFFER_SIZE 288
struct scp_run {
u32 signaled;
@@ -97,6 +96,11 @@ struct scp_ipi_desc {
struct mtk_scp;
+struct mtk_scp_sizes_data {
+ size_t max_dram_size;
+ size_t ipi_share_buffer_size;
+};
+
struct mtk_scp_of_data {
int (*scp_clk_get)(struct mtk_scp *scp);
int (*scp_before_load)(struct mtk_scp *scp);
@@ -110,6 +114,7 @@ struct mtk_scp_of_data {
u32 host_to_scp_int_bit;
size_t ipi_buf_offset;
+ const struct mtk_scp_sizes_data *scp_sizes;
};
struct mtk_scp_of_cluster {
@@ -141,10 +146,10 @@ struct mtk_scp {
struct scp_ipi_desc ipi_desc[SCP_IPI_MAX];
bool ipi_id_ack[SCP_IPI_MAX];
wait_queue_head_t ack_wq;
+ u8 *share_buf;
void *cpu_addr;
dma_addr_t dma_addr;
- size_t dram_size;
struct rproc_subdev *rpmsg_subdev;
@@ -162,7 +167,7 @@ struct mtk_scp {
struct mtk_share_obj {
u32 id;
u32 len;
- u8 share_buf[SCP_SHARE_BUFFER_SIZE];
+ u8 *share_buf;
};
void scp_memcpy_aligned(void __iomem *dst, const void *src, unsigned int len);
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
index a35409eda0cf..8206a1766481 100644
--- a/drivers/remoteproc/mtk_scp.c
+++ b/drivers/remoteproc/mtk_scp.c
@@ -20,7 +20,6 @@
#include "mtk_common.h"
#include "remoteproc_internal.h"
-#define MAX_CODE_SIZE 0x500000
#define SECTION_NAME_IPI_BUFFER ".ipi_buffer"
/**
@@ -94,14 +93,15 @@ static void scp_ipi_handler(struct mtk_scp *scp)
{
struct mtk_share_obj __iomem *rcv_obj = scp->recv_buf;
struct scp_ipi_desc *ipi_desc = scp->ipi_desc;
- u8 tmp_data[SCP_SHARE_BUFFER_SIZE];
scp_ipi_handler_t handler;
u32 id = readl(&rcv_obj->id);
u32 len = readl(&rcv_obj->len);
+ const struct mtk_scp_sizes_data *scp_sizes;
- if (len > SCP_SHARE_BUFFER_SIZE) {
- dev_err(scp->dev, "ipi message too long (len %d, max %d)", len,
- SCP_SHARE_BUFFER_SIZE);
+ scp_sizes = scp->data->scp_sizes;
+ if (len > scp_sizes->ipi_share_buffer_size) {
+ dev_err(scp->dev, "ipi message too long (len %d, max %zd)", len,
+ scp_sizes->ipi_share_buffer_size);
return;
}
if (id >= SCP_IPI_MAX) {
@@ -117,8 +117,9 @@ static void scp_ipi_handler(struct mtk_scp *scp)
return;
}
- memcpy_fromio(tmp_data, &rcv_obj->share_buf, len);
- handler(tmp_data, len, ipi_desc[id].priv);
+ memcpy_fromio(scp->share_buf, &rcv_obj->share_buf, len);
+ memset(&scp->share_buf[len], 0, scp_sizes->ipi_share_buffer_size - len);
+ handler(scp->share_buf, len, ipi_desc[id].priv);
scp_ipi_unlock(scp, id);
scp->ipi_id_ack[id] = true;
@@ -132,7 +133,9 @@ static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
static int scp_ipi_init(struct mtk_scp *scp, const struct firmware *fw)
{
int ret;
- size_t offset;
+ size_t buf_sz, offset;
+ size_t share_buf_offset;
+ const struct mtk_scp_sizes_data *scp_sizes;
/* read the ipi buf addr from FW itself first */
ret = scp_elf_read_ipi_buf_addr(scp, fw, &offset);
@@ -144,12 +147,23 @@ static int scp_ipi_init(struct mtk_scp *scp, const struct firmware *fw)
}
dev_info(scp->dev, "IPI buf addr %#010zx\n", offset);
+ /* Make sure IPI buffer fits in the L2TCM range assigned to this core */
+ buf_sz = sizeof(*scp->recv_buf) + sizeof(*scp->send_buf);
+
+ if (scp->sram_size < buf_sz + offset) {
+ dev_err(scp->dev, "IPI buffer does not fit in SRAM.\n");
+ return -EOVERFLOW;
+ }
+
+ scp_sizes = scp->data->scp_sizes;
scp->recv_buf = (struct mtk_share_obj __iomem *)
(scp->sram_base + offset);
+ share_buf_offset = sizeof(scp->recv_buf->id)
+ + sizeof(scp->recv_buf->len) + scp_sizes->ipi_share_buffer_size;
scp->send_buf = (struct mtk_share_obj __iomem *)
- (scp->sram_base + offset + sizeof(*scp->recv_buf));
- memset_io(scp->recv_buf, 0, sizeof(*scp->recv_buf));
- memset_io(scp->send_buf, 0, sizeof(*scp->send_buf));
+ (scp->sram_base + offset + share_buf_offset);
+ memset_io(scp->recv_buf, 0, share_buf_offset);
+ memset_io(scp->send_buf, 0, share_buf_offset);
return 0;
}
@@ -463,6 +477,86 @@ static int mt8186_scp_before_load(struct mtk_scp *scp)
return 0;
}
+static int mt8188_scp_l2tcm_on(struct mtk_scp *scp)
+{
+ struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
+
+ mutex_lock(&scp_cluster->cluster_lock);
+
+ if (scp_cluster->l2tcm_refcnt == 0) {
+ /* clear SPM interrupt, SCP2SPM_IPC_CLR */
+ writel(0xff, scp->cluster->reg_base + MT8192_SCP2SPM_IPC_CLR);
+
+ /* Power on L2TCM */
+ scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+ scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+ scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+ scp_sram_power_on(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
+ }
+
+ scp_cluster->l2tcm_refcnt += 1;
+
+ mutex_unlock(&scp_cluster->cluster_lock);
+
+ return 0;
+}
+
+static int mt8188_scp_before_load(struct mtk_scp *scp)
+{
+ writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
+
+ mt8188_scp_l2tcm_on(scp);
+
+ scp_sram_power_on(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
+
+ /* enable MPU for all memory regions */
+ writel(0xff, scp->cluster->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
+
+ return 0;
+}
+
+static int mt8188_scp_c1_before_load(struct mtk_scp *scp)
+{
+ u32 sec_ctrl;
+ struct mtk_scp *scp_c0;
+ struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
+
+ scp->data->scp_reset_assert(scp);
+
+ mt8188_scp_l2tcm_on(scp);
+
+ scp_sram_power_on(scp->cluster->reg_base + MT8195_CPU1_SRAM_PD, 0);
+
+ /* enable MPU for all memory regions */
+ writel(0xff, scp->cluster->reg_base + MT8195_CORE1_MEM_ATT_PREDEF);
+
+ /*
+ * The L2TCM_OFFSET_RANGE and L2TCM_OFFSET shift the destination address
+ * on SRAM when SCP core 1 accesses SRAM.
+ *
+ * This configuration solves booting the SCP core 0 and core 1 from
+ * different SRAM address because core 0 and core 1 both boot from
+ * the head of SRAM by default. this must be configured before boot SCP core 1.
+ *
+ * The value of L2TCM_OFFSET_RANGE is from the viewpoint of SCP core 1.
+ * When SCP core 1 issues address within the range (L2TCM_OFFSET_RANGE),
+ * the address will be added with a fixed offset (L2TCM_OFFSET) on the bus.
+ * The shift action is tranparent to software.
+ */
+ writel(0, scp->cluster->reg_base + MT8195_L2TCM_OFFSET_RANGE_0_LOW);
+ writel(scp->sram_size, scp->cluster->reg_base + MT8195_L2TCM_OFFSET_RANGE_0_HIGH);
+
+ scp_c0 = list_first_entry(&scp_cluster->mtk_scp_list, struct mtk_scp, elem);
+ writel(scp->sram_phys - scp_c0->sram_phys, scp->cluster->reg_base + MT8195_L2TCM_OFFSET);
+
+ /* enable SRAM offset when fetching instruction and data */
+ sec_ctrl = readl(scp->cluster->reg_base + MT8195_SEC_CTRL);
+ sec_ctrl |= MT8195_CORE_OFFSET_ENABLE_I | MT8195_CORE_OFFSET_ENABLE_D;
+ writel(sec_ctrl, scp->cluster->reg_base + MT8195_SEC_CTRL);
+
+ return 0;
+}
+
static int mt8192_scp_before_load(struct mtk_scp *scp)
{
/* clear SPM interrupt, SCP2SPM_IPC_CLR */
@@ -653,14 +747,16 @@ stop:
static void *mt8183_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
{
int offset;
+ const struct mtk_scp_sizes_data *scp_sizes;
+ scp_sizes = scp->data->scp_sizes;
if (da < scp->sram_size) {
offset = da;
if (offset >= 0 && (offset + len) <= scp->sram_size)
return (void __force *)scp->sram_base + offset;
- } else if (scp->dram_size) {
+ } else if (scp_sizes->max_dram_size) {
offset = da - scp->dma_addr;
- if (offset >= 0 && (offset + len) <= scp->dram_size)
+ if (offset >= 0 && (offset + len) <= scp_sizes->max_dram_size)
return scp->cpu_addr + offset;
}
@@ -670,7 +766,9 @@ static void *mt8183_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
static void *mt8192_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
{
int offset;
+ const struct mtk_scp_sizes_data *scp_sizes;
+ scp_sizes = scp->data->scp_sizes;
if (da >= scp->sram_phys &&
(da + len) <= scp->sram_phys + scp->sram_size) {
offset = da - scp->sram_phys;
@@ -686,9 +784,9 @@ static void *mt8192_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
}
/* optional memory region */
- if (scp->dram_size &&
+ if (scp_sizes->max_dram_size &&
da >= scp->dma_addr &&
- (da + len) <= scp->dma_addr + scp->dram_size) {
+ (da + len) <= scp->dma_addr + scp_sizes->max_dram_size) {
offset = da - scp->dma_addr;
return scp->cpu_addr + offset;
}
@@ -709,6 +807,47 @@ static void mt8183_scp_stop(struct mtk_scp *scp)
writel(0, scp->cluster->reg_base + MT8183_WDT_CFG);
}
+static void mt8188_scp_l2tcm_off(struct mtk_scp *scp)
+{
+ struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
+
+ mutex_lock(&scp_cluster->cluster_lock);
+
+ if (scp_cluster->l2tcm_refcnt > 0)
+ scp_cluster->l2tcm_refcnt -= 1;
+
+ if (scp_cluster->l2tcm_refcnt == 0) {
+ /* Power off L2TCM */
+ scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+ scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+ scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+ scp_sram_power_off(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
+ }
+
+ mutex_unlock(&scp_cluster->cluster_lock);
+}
+
+static void mt8188_scp_stop(struct mtk_scp *scp)
+{
+ mt8188_scp_l2tcm_off(scp);
+
+ scp_sram_power_off(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
+
+ /* Disable SCP watchdog */
+ writel(0, scp->cluster->reg_base + MT8192_CORE0_WDT_CFG);
+}
+
+static void mt8188_scp_c1_stop(struct mtk_scp *scp)
+{
+ mt8188_scp_l2tcm_off(scp);
+
+ /* Power off CPU SRAM */
+ scp_sram_power_off(scp->cluster->reg_base + MT8195_CPU1_SRAM_PD, 0);
+
+ /* Disable SCP watchdog */
+ writel(0, scp->cluster->reg_base + MT8195_CORE1_WDT_CFG);
+}
+
static void mt8192_scp_stop(struct mtk_scp *scp)
{
/* Disable SRAM clock */
@@ -868,6 +1007,7 @@ EXPORT_SYMBOL_GPL(scp_mapping_dm_addr);
static int scp_map_memory_region(struct mtk_scp *scp)
{
int ret;
+ const struct mtk_scp_sizes_data *scp_sizes;
ret = of_reserved_mem_device_init(scp->dev);
@@ -883,8 +1023,8 @@ static int scp_map_memory_region(struct mtk_scp *scp)
}
/* Reserved SCP code size */
- scp->dram_size = MAX_CODE_SIZE;
- scp->cpu_addr = dma_alloc_coherent(scp->dev, scp->dram_size,
+ scp_sizes = scp->data->scp_sizes;
+ scp->cpu_addr = dma_alloc_coherent(scp->dev, scp_sizes->max_dram_size,
&scp->dma_addr, GFP_KERNEL);
if (!scp->cpu_addr)
return -ENOMEM;
@@ -894,10 +1034,13 @@ static int scp_map_memory_region(struct mtk_scp *scp)
static void scp_unmap_memory_region(struct mtk_scp *scp)
{
- if (scp->dram_size == 0)
+ const struct mtk_scp_sizes_data *scp_sizes;
+
+ scp_sizes = scp->data->scp_sizes;
+ if (scp_sizes->max_dram_size == 0)
return;
- dma_free_coherent(scp->dev, scp->dram_size, scp->cpu_addr,
+ dma_free_coherent(scp->dev, scp_sizes->max_dram_size, scp->cpu_addr,
scp->dma_addr);
of_reserved_mem_device_release(scp->dev);
}
@@ -961,6 +1104,7 @@ static struct mtk_scp *scp_rproc_init(struct platform_device *pdev,
struct resource *res;
const char *fw_name = "scp.img";
int ret, i;
+ const struct mtk_scp_sizes_data *scp_sizes;
ret = rproc_of_parse_firmware(dev, 0, &fw_name);
if (ret < 0 && ret != -EINVAL)
@@ -1008,6 +1152,14 @@ static struct mtk_scp *scp_rproc_init(struct platform_device *pdev,
goto release_dev_mem;
}
+ scp_sizes = scp->data->scp_sizes;
+ scp->share_buf = kzalloc(scp_sizes->ipi_share_buffer_size, GFP_KERNEL);
+ if (!scp->share_buf) {
+ dev_err(dev, "Failed to allocate IPI share buffer\n");
+ ret = -ENOMEM;
+ goto release_dev_mem;
+ }
+
init_waitqueue_head(&scp->run.wq);
init_waitqueue_head(&scp->ack_wq);
@@ -1027,6 +1179,8 @@ static struct mtk_scp *scp_rproc_init(struct platform_device *pdev,
remove_subdev:
scp_remove_rpmsg_subdev(scp);
scp_ipi_unregister(scp, SCP_IPI_INIT);
+ kfree(scp->share_buf);
+ scp->share_buf = NULL;
release_dev_mem:
scp_unmap_memory_region(scp);
for (i = 0; i < SCP_IPI_MAX; i++)
@@ -1042,6 +1196,8 @@ static void scp_free(struct mtk_scp *scp)
scp_remove_rpmsg_subdev(scp);
scp_ipi_unregister(scp, SCP_IPI_INIT);
+ kfree(scp->share_buf);
+ scp->share_buf = NULL;
scp_unmap_memory_region(scp);
for (i = 0; i < SCP_IPI_MAX; i++)
mutex_destroy(&scp->ipi_desc[i].lock);
@@ -1170,6 +1326,11 @@ static int scp_cluster_init(struct platform_device *pdev, struct mtk_scp_of_clus
return ret;
}
+static const struct of_device_id scp_core_match[] = {
+ { .compatible = "mediatek,scp-core" },
+ {}
+};
+
static int scp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1188,14 +1349,12 @@ static int scp_probe(struct platform_device *pdev)
/* l1tcm is an optional memory region */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l1tcm");
- scp_cluster->l1tcm_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(scp_cluster->l1tcm_base)) {
- ret = PTR_ERR(scp_cluster->l1tcm_base);
- if (ret != -EINVAL)
- return dev_err_probe(dev, ret, "Failed to map l1tcm memory\n");
+ if (res) {
+ scp_cluster->l1tcm_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(scp_cluster->l1tcm_base))
+ return dev_err_probe(dev, PTR_ERR(scp_cluster->l1tcm_base),
+ "Failed to map l1tcm memory\n");
- scp_cluster->l1tcm_base = NULL;
- } else {
scp_cluster->l1tcm_size = resource_size(res);
scp_cluster->l1tcm_phys = res->start;
}
@@ -1203,13 +1362,15 @@ static int scp_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&scp_cluster->mtk_scp_list);
mutex_init(&scp_cluster->cluster_lock);
- ret = devm_of_platform_populate(dev);
+ ret = of_platform_populate(dev_of_node(dev), scp_core_match, NULL, dev);
if (ret)
return dev_err_probe(dev, ret, "Failed to populate platform devices\n");
ret = scp_cluster_init(pdev, scp_cluster);
- if (ret)
+ if (ret) {
+ of_platform_depopulate(dev);
return ret;
+ }
return 0;
}
@@ -1225,9 +1386,30 @@ static void scp_remove(struct platform_device *pdev)
rproc_del(scp->rproc);
scp_free(scp);
}
+ of_platform_depopulate(&pdev->dev);
mutex_destroy(&scp_cluster->cluster_lock);
}
+static const struct mtk_scp_sizes_data default_scp_sizes = {
+ .max_dram_size = 0x500000,
+ .ipi_share_buffer_size = 288,
+};
+
+static const struct mtk_scp_sizes_data mt8188_scp_sizes = {
+ .max_dram_size = 0x800000,
+ .ipi_share_buffer_size = 600,
+};
+
+static const struct mtk_scp_sizes_data mt8188_scp_c1_sizes = {
+ .max_dram_size = 0xA00000,
+ .ipi_share_buffer_size = 600,
+};
+
+static const struct mtk_scp_sizes_data mt8195_scp_sizes = {
+ .max_dram_size = 0x800000,
+ .ipi_share_buffer_size = 288,
+};
+
static const struct mtk_scp_of_data mt8183_of_data = {
.scp_clk_get = mt8183_scp_clk_get,
.scp_before_load = mt8183_scp_before_load,
@@ -1239,6 +1421,7 @@ static const struct mtk_scp_of_data mt8183_of_data = {
.host_to_scp_reg = MT8183_HOST_TO_SCP,
.host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
.ipi_buf_offset = 0x7bdb0,
+ .scp_sizes = &default_scp_sizes,
};
static const struct mtk_scp_of_data mt8186_of_data = {
@@ -1252,18 +1435,33 @@ static const struct mtk_scp_of_data mt8186_of_data = {
.host_to_scp_reg = MT8183_HOST_TO_SCP,
.host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
.ipi_buf_offset = 0x3bdb0,
+ .scp_sizes = &default_scp_sizes,
};
static const struct mtk_scp_of_data mt8188_of_data = {
.scp_clk_get = mt8195_scp_clk_get,
- .scp_before_load = mt8192_scp_before_load,
- .scp_irq_handler = mt8192_scp_irq_handler,
+ .scp_before_load = mt8188_scp_before_load,
+ .scp_irq_handler = mt8195_scp_irq_handler,
.scp_reset_assert = mt8192_scp_reset_assert,
.scp_reset_deassert = mt8192_scp_reset_deassert,
- .scp_stop = mt8192_scp_stop,
+ .scp_stop = mt8188_scp_stop,
.scp_da_to_va = mt8192_scp_da_to_va,
.host_to_scp_reg = MT8192_GIPC_IN_SET,
.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
+ .scp_sizes = &mt8188_scp_sizes,
+};
+
+static const struct mtk_scp_of_data mt8188_of_data_c1 = {
+ .scp_clk_get = mt8195_scp_clk_get,
+ .scp_before_load = mt8188_scp_c1_before_load,
+ .scp_irq_handler = mt8195_scp_c1_irq_handler,
+ .scp_reset_assert = mt8195_scp_c1_reset_assert,
+ .scp_reset_deassert = mt8195_scp_c1_reset_deassert,
+ .scp_stop = mt8188_scp_c1_stop,
+ .scp_da_to_va = mt8192_scp_da_to_va,
+ .host_to_scp_reg = MT8192_GIPC_IN_SET,
+ .host_to_scp_int_bit = MT8195_CORE1_HOST_IPC_INT_BIT,
+ .scp_sizes = &mt8188_scp_c1_sizes,
};
static const struct mtk_scp_of_data mt8192_of_data = {
@@ -1276,6 +1474,7 @@ static const struct mtk_scp_of_data mt8192_of_data = {
.scp_da_to_va = mt8192_scp_da_to_va,
.host_to_scp_reg = MT8192_GIPC_IN_SET,
.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
+ .scp_sizes = &default_scp_sizes,
};
static const struct mtk_scp_of_data mt8195_of_data = {
@@ -1288,6 +1487,7 @@ static const struct mtk_scp_of_data mt8195_of_data = {
.scp_da_to_va = mt8192_scp_da_to_va,
.host_to_scp_reg = MT8192_GIPC_IN_SET,
.host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
+ .scp_sizes = &mt8195_scp_sizes,
};
static const struct mtk_scp_of_data mt8195_of_data_c1 = {
@@ -1300,6 +1500,13 @@ static const struct mtk_scp_of_data mt8195_of_data_c1 = {
.scp_da_to_va = mt8192_scp_da_to_va,
.host_to_scp_reg = MT8192_GIPC_IN_SET,
.host_to_scp_int_bit = MT8195_CORE1_HOST_IPC_INT_BIT,
+ .scp_sizes = &default_scp_sizes,
+};
+
+static const struct mtk_scp_of_data *mt8188_of_data_cores[] = {
+ &mt8188_of_data,
+ &mt8188_of_data_c1,
+ NULL
};
static const struct mtk_scp_of_data *mt8195_of_data_cores[] = {
@@ -1312,6 +1519,7 @@ static const struct of_device_id mtk_scp_of_match[] = {
{ .compatible = "mediatek,mt8183-scp", .data = &mt8183_of_data },
{ .compatible = "mediatek,mt8186-scp", .data = &mt8186_of_data },
{ .compatible = "mediatek,mt8188-scp", .data = &mt8188_of_data },
+ { .compatible = "mediatek,mt8188-scp-dual", .data = &mt8188_of_data_cores },
{ .compatible = "mediatek,mt8192-scp", .data = &mt8192_of_data },
{ .compatible = "mediatek,mt8195-scp", .data = &mt8195_of_data },
{ .compatible = "mediatek,mt8195-scp-dual", .data = &mt8195_of_data_cores },
@@ -1321,7 +1529,7 @@ MODULE_DEVICE_TABLE(of, mtk_scp_of_match);
static struct platform_driver mtk_scp_driver = {
.probe = scp_probe,
- .remove_new = scp_remove,
+ .remove = scp_remove,
.driver = {
.name = "mtk-scp",
.of_match_table = mtk_scp_of_match,
diff --git a/drivers/remoteproc/mtk_scp_ipi.c b/drivers/remoteproc/mtk_scp_ipi.c
index cd0b60106ec2..c068227e251e 100644
--- a/drivers/remoteproc/mtk_scp_ipi.c
+++ b/drivers/remoteproc/mtk_scp_ipi.c
@@ -162,10 +162,13 @@ int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
struct mtk_share_obj __iomem *send_obj = scp->send_buf;
u32 val;
int ret;
+ const struct mtk_scp_sizes_data *scp_sizes;
+
+ scp_sizes = scp->data->scp_sizes;
if (WARN_ON(id <= SCP_IPI_INIT) || WARN_ON(id >= SCP_IPI_MAX) ||
WARN_ON(id == SCP_IPI_NS_SERVICE) ||
- WARN_ON(len > sizeof(send_obj->share_buf)) || WARN_ON(!buf))
+ WARN_ON(len > scp_sizes->ipi_share_buffer_size) || WARN_ON(!buf))
return -EINVAL;
ret = clk_prepare_enable(scp->clk);
@@ -184,7 +187,7 @@ int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
goto unlock_mutex;
}
- scp_memcpy_aligned(send_obj->share_buf, buf, len);
+ scp_memcpy_aligned(&send_obj->share_buf, buf, len);
writel(len, &send_obj->len);
writel(id, &send_obj->id);
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 8f50ab80e56f..9c7182b3b038 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -37,6 +37,10 @@
#include <linux/platform_data/dmtimer-omap.h>
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+#include <asm/dma-iommu.h>
+#endif
+
#include "omap_remoteproc.h"
#include "remoteproc_internal.h"
@@ -720,6 +724,7 @@ out:
* @rproc: remote processor to apply the address translation for
* @da: device address to translate
* @len: length of the memory buffer
+ * @is_iomem: pointer filled in to indicate if @da is iomapped memory
*
* Custom function implementing the rproc .da_to_va ops to provide address
* translation (device address to kernel virtual address) for internal RAMs
@@ -1133,7 +1138,6 @@ static int omap_rproc_get_boot_data(struct platform_device *pdev,
struct device_node *np = pdev->dev.of_node;
struct omap_rproc *oproc = rproc->priv;
const struct omap_rproc_dev_data *data;
- int ret;
data = of_device_get_match_data(&pdev->dev);
if (!data)
@@ -1149,10 +1153,8 @@ static int omap_rproc_get_boot_data(struct platform_device *pdev,
oproc->boot_data->syscon =
syscon_regmap_lookup_by_phandle(np, "ti,bootreg");
- if (IS_ERR(oproc->boot_data->syscon)) {
- ret = PTR_ERR(oproc->boot_data->syscon);
- return ret;
- }
+ if (IS_ERR(oproc->boot_data->syscon))
+ return PTR_ERR(oproc->boot_data->syscon);
if (of_property_read_u32_index(np, "ti,bootreg", 1,
&oproc->boot_data->boot_reg)) {
@@ -1277,6 +1279,13 @@ static int omap_rproc_of_get_timers(struct platform_device *pdev,
return 0;
}
+static void omap_rproc_mem_release(void *data)
+{
+ struct device *dev = data;
+
+ of_reserved_mem_device_release(dev);
+}
+
static int omap_rproc_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -1305,8 +1314,8 @@ static int omap_rproc_probe(struct platform_device *pdev)
return ret;
}
- rproc = rproc_alloc(&pdev->dev, dev_name(&pdev->dev), &omap_rproc_ops,
- firmware, sizeof(*oproc));
+ rproc = devm_rproc_alloc(&pdev->dev, dev_name(&pdev->dev), &omap_rproc_ops,
+ firmware, sizeof(*oproc));
if (!rproc)
return -ENOMEM;
@@ -1316,17 +1325,30 @@ static int omap_rproc_probe(struct platform_device *pdev)
/* All existing OMAP IPU and DSP processors have an MMU */
rproc->has_iommu = true;
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+ /*
+ * Throw away the ARM DMA mapping that we'll never use, so it doesn't
+ * interfere with the core rproc->domain and we get the right DMA ops.
+ */
+ if (pdev->dev.archdata.mapping) {
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(&pdev->dev);
+
+ arm_iommu_detach_device(&pdev->dev);
+ arm_iommu_release_mapping(mapping);
+ }
+#endif
+
ret = omap_rproc_of_get_internal_memories(pdev, rproc);
if (ret)
- goto free_rproc;
+ return ret;
ret = omap_rproc_get_boot_data(pdev, rproc);
if (ret)
- goto free_rproc;
+ return ret;
ret = omap_rproc_of_get_timers(pdev, rproc);
if (ret)
- goto free_rproc;
+ return ret;
init_completion(&oproc->pm_comp);
oproc->autosuspend_delay = DEFAULT_AUTOSUSPEND_DELAY;
@@ -1337,10 +1359,8 @@ static int omap_rproc_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(&pdev->dev, oproc->autosuspend_delay);
oproc->fck = devm_clk_get(&pdev->dev, 0);
- if (IS_ERR(oproc->fck)) {
- ret = PTR_ERR(oproc->fck);
- goto free_rproc;
- }
+ if (IS_ERR(oproc->fck))
+ return PTR_ERR(oproc->fck);
ret = of_reserved_mem_device_init(&pdev->dev);
if (ret) {
@@ -1348,29 +1368,17 @@ static int omap_rproc_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "Typically this should be provided,\n");
dev_warn(&pdev->dev, "only omit if you know what you are doing.\n");
}
+ ret = devm_add_action_or_reset(&pdev->dev, omap_rproc_mem_release, &pdev->dev);
+ if (ret)
+ return ret;
platform_set_drvdata(pdev, rproc);
- ret = rproc_add(rproc);
+ ret = devm_rproc_add(&pdev->dev, rproc);
if (ret)
- goto release_mem;
+ return ret;
return 0;
-
-release_mem:
- of_reserved_mem_device_release(&pdev->dev);
-free_rproc:
- rproc_free(rproc);
- return ret;
-}
-
-static void omap_rproc_remove(struct platform_device *pdev)
-{
- struct rproc *rproc = platform_get_drvdata(pdev);
-
- rproc_del(rproc);
- rproc_free(rproc);
- of_reserved_mem_device_release(&pdev->dev);
}
static const struct dev_pm_ops omap_rproc_pm_ops = {
@@ -1381,7 +1389,6 @@ static const struct dev_pm_ops omap_rproc_pm_ops = {
static struct platform_driver omap_rproc_driver = {
.probe = omap_rproc_probe,
- .remove_new = omap_rproc_remove,
.driver = {
.name = "omap-rproc",
.pm = &omap_rproc_pm_ops,
diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
index 327f0c7ee3d6..4a4eb9c0b133 100644
--- a/drivers/remoteproc/pru_rproc.c
+++ b/drivers/remoteproc/pru_rproc.c
@@ -563,7 +563,7 @@ static int pru_handle_intrmap(struct rproc *rproc)
return -ENODEV;
}
- fwspec.fwnode = of_node_to_fwnode(irq_parent);
+ fwspec.fwnode = of_fwnode_handle(irq_parent);
fwspec.param_count = 3;
for (i = 0; i < pru->evt_count; i++) {
fwspec.param[0] = rsc->pru_intc_map[i].event;
@@ -1132,7 +1132,7 @@ static struct platform_driver pru_rproc_driver = {
.suppress_bind_attrs = true,
},
.probe = pru_rproc_probe,
- .remove_new = pru_rproc_remove,
+ .remove = pru_rproc_remove,
};
module_platform_driver(pru_rproc_driver);
diff --git a/drivers/remoteproc/qcom_common.c b/drivers/remoteproc/qcom_common.c
index 03e5f5d533eb..8c8688f99f0a 100644
--- a/drivers/remoteproc/qcom_common.c
+++ b/drivers/remoteproc/qcom_common.c
@@ -13,6 +13,7 @@
#include <linux/notifier.h>
#include <linux/remoteproc.h>
#include <linux/remoteproc/qcom_rproc.h>
+#include <linux/auxiliary_bus.h>
#include <linux/rpmsg/qcom_glink.h>
#include <linux/rpmsg/qcom_smd.h>
#include <linux/slab.h>
@@ -25,6 +26,7 @@
#define to_glink_subdev(d) container_of(d, struct qcom_rproc_glink, subdev)
#define to_smd_subdev(d) container_of(d, struct qcom_rproc_subdev, subdev)
#define to_ssr_subdev(d) container_of(d, struct qcom_rproc_ssr, subdev)
+#define to_pdm_subdev(d) container_of(d, struct qcom_rproc_pdm, subdev)
#define MAX_NUM_OF_SS 10
#define MAX_REGION_NAME_LENGTH 16
@@ -519,5 +521,90 @@ void qcom_remove_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr)
}
EXPORT_SYMBOL_GPL(qcom_remove_ssr_subdev);
+static void pdm_dev_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+
+ kfree(adev);
+}
+
+static int pdm_notify_prepare(struct rproc_subdev *subdev)
+{
+ struct qcom_rproc_pdm *pdm = to_pdm_subdev(subdev);
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ adev->dev.parent = pdm->dev;
+ adev->dev.release = pdm_dev_release;
+ adev->name = "pd-mapper";
+ adev->id = pdm->index;
+
+ ret = auxiliary_device_init(adev);
+ if (ret) {
+ kfree(adev);
+ return ret;
+ }
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ret;
+ }
+
+ pdm->adev = adev;
+
+ return 0;
+}
+
+
+static void pdm_notify_unprepare(struct rproc_subdev *subdev)
+{
+ struct qcom_rproc_pdm *pdm = to_pdm_subdev(subdev);
+
+ if (!pdm->adev)
+ return;
+
+ auxiliary_device_delete(pdm->adev);
+ auxiliary_device_uninit(pdm->adev);
+ pdm->adev = NULL;
+}
+
+/**
+ * qcom_add_pdm_subdev() - register PD Mapper subdevice
+ * @rproc: rproc handle
+ * @pdm: PDM subdevice handle
+ *
+ * Register @pdm so that Protection Device mapper service is started when the
+ * DSP is started too.
+ */
+void qcom_add_pdm_subdev(struct rproc *rproc, struct qcom_rproc_pdm *pdm)
+{
+ pdm->dev = &rproc->dev;
+ pdm->index = rproc->index;
+
+ pdm->subdev.prepare = pdm_notify_prepare;
+ pdm->subdev.unprepare = pdm_notify_unprepare;
+
+ rproc_add_subdev(rproc, &pdm->subdev);
+}
+EXPORT_SYMBOL_GPL(qcom_add_pdm_subdev);
+
+/**
+ * qcom_remove_pdm_subdev() - remove PD Mapper subdevice
+ * @rproc: rproc handle
+ * @pdm: PDM subdevice handle
+ *
+ * Remove the PD Mapper subdevice.
+ */
+void qcom_remove_pdm_subdev(struct rproc *rproc, struct qcom_rproc_pdm *pdm)
+{
+ rproc_remove_subdev(rproc, &pdm->subdev);
+}
+EXPORT_SYMBOL_GPL(qcom_remove_pdm_subdev);
+
MODULE_DESCRIPTION("Qualcomm Remoteproc helper driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/qcom_common.h b/drivers/remoteproc/qcom_common.h
index 9ef4449052a9..b07fbaa091a0 100644
--- a/drivers/remoteproc/qcom_common.h
+++ b/drivers/remoteproc/qcom_common.h
@@ -34,6 +34,13 @@ struct qcom_rproc_ssr {
struct qcom_ssr_subsystem *info;
};
+struct qcom_rproc_pdm {
+ struct rproc_subdev subdev;
+ struct device *dev;
+ int index;
+ struct auxiliary_device *adev;
+};
+
void qcom_minidump(struct rproc *rproc, unsigned int minidump_id,
void (*rproc_dumpfn_t)(struct rproc *rproc,
struct rproc_dump_segment *segment, void *dest, size_t offset,
@@ -52,6 +59,9 @@ void qcom_add_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr,
const char *ssr_name);
void qcom_remove_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr);
+void qcom_add_pdm_subdev(struct rproc *rproc, struct qcom_rproc_pdm *pdm);
+void qcom_remove_pdm_subdev(struct rproc *rproc, struct qcom_rproc_pdm *pdm);
+
#if IS_ENABLED(CONFIG_QCOM_SYSMON)
struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
const char *name,
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index 1d24c9b656a8..94af77baa7a1 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -112,6 +112,7 @@ struct qcom_adsp {
struct dev_pm_domain_list *pd_list;
struct qcom_rproc_glink glink_subdev;
+ struct qcom_rproc_pdm pdm_subdev;
struct qcom_rproc_ssr ssr_subdev;
struct qcom_sysmon *sysmon;
@@ -533,15 +534,11 @@ static const struct rproc_ops adsp_ops = {
static int adsp_init_clock(struct qcom_adsp *adsp, const char **clk_ids)
{
int num_clks = 0;
- int i, ret;
+ int i;
adsp->xo = devm_clk_get(adsp->dev, "xo");
- if (IS_ERR(adsp->xo)) {
- ret = PTR_ERR(adsp->xo);
- if (ret != -EPROBE_DEFER)
- dev_err(adsp->dev, "failed to get xo clock");
- return ret;
- }
+ if (IS_ERR(adsp->xo))
+ return dev_err_probe(adsp->dev, PTR_ERR(adsp->xo), "failed to get xo clock");
for (i = 0; clk_ids[i]; i++)
num_clks++;
@@ -707,10 +704,9 @@ static int adsp_probe(struct platform_device *pdev)
return ret;
ret = qcom_rproc_pds_attach(adsp, desc->pd_names, desc->num_pds);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to attach proxy power domains\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to attach proxy power domains\n");
ret = adsp_init_reset(adsp);
if (ret)
@@ -726,21 +722,29 @@ static int adsp_probe(struct platform_device *pdev)
goto disable_pm;
qcom_add_glink_subdev(rproc, &adsp->glink_subdev, desc->ssr_name);
+ qcom_add_pdm_subdev(rproc, &adsp->pdm_subdev);
qcom_add_ssr_subdev(rproc, &adsp->ssr_subdev, desc->ssr_name);
adsp->sysmon = qcom_add_sysmon_subdev(rproc,
desc->sysmon_name,
desc->ssctl_id);
if (IS_ERR(adsp->sysmon)) {
ret = PTR_ERR(adsp->sysmon);
- goto disable_pm;
+ goto deinit_remove_glink_pdm_ssr;
}
ret = rproc_add(rproc);
if (ret)
- goto disable_pm;
+ goto remove_sysmon;
return 0;
+remove_sysmon:
+ qcom_remove_sysmon_subdev(adsp->sysmon);
+deinit_remove_glink_pdm_ssr:
+ qcom_q6v5_deinit(&adsp->q6v5);
+ qcom_remove_glink_subdev(rproc, &adsp->glink_subdev);
+ qcom_remove_pdm_subdev(rproc, &adsp->pdm_subdev);
+ qcom_remove_ssr_subdev(rproc, &adsp->ssr_subdev);
disable_pm:
qcom_rproc_pds_detach(adsp);
@@ -755,6 +759,7 @@ static void adsp_remove(struct platform_device *pdev)
qcom_q6v5_deinit(&adsp->q6v5);
qcom_remove_glink_subdev(adsp->rproc, &adsp->glink_subdev);
+ qcom_remove_pdm_subdev(adsp->rproc, &adsp->pdm_subdev);
qcom_remove_sysmon_subdev(adsp->sysmon);
qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev);
qcom_rproc_pds_detach(adsp);
@@ -837,7 +842,7 @@ MODULE_DEVICE_TABLE(of, adsp_of_match);
static struct platform_driver adsp_pil_driver = {
.probe = adsp_probe,
- .remove_new = adsp_remove,
+ .remove = adsp_remove,
.driver = {
.name = "qcom_q6v5_adsp",
.of_match_table = adsp_of_match,
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 1779fc890e10..0c0199fb0e68 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -134,6 +134,11 @@
#define BOOT_FSM_TIMEOUT 10000
#define BHS_CHECK_MAX_LOOPS 200
+/* External power block headswitch */
+#define EXTERNAL_BHS_ON BIT(0)
+#define EXTERNAL_BHS_STATUS BIT(4)
+#define EXTERNAL_BHS_TIMEOUT_US 50
+
struct reg_info {
struct regulator *reg;
int uV;
@@ -161,6 +166,7 @@ struct rproc_hexagon_res {
bool has_mba_logs;
bool has_spare_reg;
bool has_qaccept_regs;
+ bool has_ext_bhs_reg;
bool has_ext_cntl_regs;
bool has_vq6;
};
@@ -180,6 +186,7 @@ struct q6v5 {
u32 halt_nc;
u32 halt_vq6;
u32 conn_box;
+ u32 ext_bhs;
u32 qaccept_mdm;
u32 qaccept_cx;
@@ -228,6 +235,7 @@ struct q6v5 {
struct qcom_rproc_glink glink_subdev;
struct qcom_rproc_subdev smd_subdev;
+ struct qcom_rproc_pdm pdm_subdev;
struct qcom_rproc_ssr ssr_subdev;
struct qcom_sysmon *sysmon;
struct platform_device *bam_dmux;
@@ -236,6 +244,7 @@ struct q6v5 {
bool has_mba_logs;
bool has_spare_reg;
bool has_qaccept_regs;
+ bool has_ext_bhs_reg;
bool has_ext_cntl_regs;
bool has_vq6;
u64 mpss_perm;
@@ -245,8 +254,10 @@ struct q6v5 {
};
enum {
+ MSS_MSM8226,
MSS_MSM8909,
MSS_MSM8916,
+ MSS_MSM8926,
MSS_MSM8953,
MSS_MSM8974,
MSS_MSM8996,
@@ -260,7 +271,6 @@ enum {
static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
const struct qcom_mss_reg_res *reg_res)
{
- int rc;
int i;
if (!reg_res)
@@ -268,13 +278,10 @@ static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
for (i = 0; reg_res[i].supply; i++) {
regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
- if (IS_ERR(regs[i].reg)) {
- rc = PTR_ERR(regs[i].reg);
- if (rc != -EPROBE_DEFER)
- dev_err(dev, "Failed to get %s\n regulator",
- reg_res[i].supply);
- return rc;
- }
+ if (IS_ERR(regs[i].reg))
+ return dev_err_probe(dev, PTR_ERR(regs[i].reg),
+ "Failed to get %s\n regulator",
+ reg_res[i].supply);
regs[i].uV = reg_res[i].uV;
regs[i].uA = reg_res[i].uA;
@@ -418,6 +425,34 @@ static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
}
}
+static int q6v5_external_bhs_enable(struct q6v5 *qproc)
+{
+ u32 val;
+ int ret = 0;
+
+ /*
+ * Enable external power block headswitch and wait for it to
+ * stabilize
+ */
+ regmap_set_bits(qproc->conn_map, qproc->ext_bhs, EXTERNAL_BHS_ON);
+
+ ret = regmap_read_poll_timeout(qproc->conn_map, qproc->ext_bhs,
+ val, val & EXTERNAL_BHS_STATUS,
+ 1, EXTERNAL_BHS_TIMEOUT_US);
+
+ if (ret) {
+ dev_err(qproc->dev, "External BHS timed out\n");
+ ret = -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+static void q6v5_external_bhs_disable(struct q6v5 *qproc)
+{
+ regmap_clear_bits(qproc->conn_map, qproc->ext_bhs, EXTERNAL_BHS_ON);
+}
+
static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, u64 *current_perm,
bool local, bool remote, phys_addr_t addr,
size_t size)
@@ -1115,11 +1150,17 @@ static int q6v5_mba_load(struct q6v5 *qproc)
goto disable_proxy_clk;
}
+ if (qproc->has_ext_bhs_reg) {
+ ret = q6v5_external_bhs_enable(qproc);
+ if (ret < 0)
+ goto disable_vdd;
+ }
+
ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
qproc->reset_clk_count);
if (ret) {
dev_err(qproc->dev, "failed to enable reset clocks\n");
- goto disable_vdd;
+ goto disable_ext_bhs;
}
ret = q6v5_reset_deassert(qproc);
@@ -1161,6 +1202,9 @@ static int q6v5_mba_load(struct q6v5 *qproc)
goto disable_active_clks;
}
+ if (qproc->has_mba_logs)
+ qcom_pil_info_store("mba", qproc->mba_phys, MBA_LOG_SIZE);
+
writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
if (qproc->dp_size) {
writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG);
@@ -1171,9 +1215,6 @@ static int q6v5_mba_load(struct q6v5 *qproc)
if (ret)
goto reclaim_mba;
- if (qproc->has_mba_logs)
- qcom_pil_info_store("mba", qproc->mba_phys, MBA_LOG_SIZE);
-
ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
if (ret == -ETIMEDOUT) {
dev_err(qproc->dev, "MBA boot timed out\n");
@@ -1217,6 +1258,9 @@ assert_reset:
disable_reset_clks:
q6v5_clk_disable(qproc->dev, qproc->reset_clks,
qproc->reset_clk_count);
+disable_ext_bhs:
+ if (qproc->has_ext_bhs_reg)
+ q6v5_external_bhs_disable(qproc);
disable_vdd:
q6v5_regulator_disable(qproc, qproc->active_regs,
qproc->active_reg_count);
@@ -1284,6 +1328,8 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
qproc->reset_clk_count);
q6v5_clk_disable(qproc->dev, qproc->active_clks,
qproc->active_clk_count);
+ if (qproc->has_ext_bhs_reg)
+ q6v5_external_bhs_disable(qproc);
q6v5_regulator_disable(qproc, qproc->active_regs,
qproc->active_reg_count);
@@ -1753,6 +1799,23 @@ static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
qproc->qaccept_axi = args.args[2];
}
+ if (qproc->has_ext_bhs_reg) {
+ ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+ "qcom,ext-bhs-reg",
+ 1, 0, &args);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse ext-bhs-reg index 0\n");
+ return -EINVAL;
+ }
+
+ qproc->conn_map = syscon_node_to_regmap(args.np);
+ of_node_put(args.np);
+ if (IS_ERR(qproc->conn_map))
+ return PTR_ERR(qproc->conn_map);
+
+ qproc->ext_bhs = args.args[0];
+ }
+
if (qproc->has_ext_cntl_regs) {
ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
"qcom,ext-regs",
@@ -1812,14 +1875,10 @@ static int q6v5_init_clocks(struct device *dev, struct clk **clks,
for (i = 0; clk_names[i]; i++) {
clks[i] = devm_clk_get(dev, clk_names[i]);
- if (IS_ERR(clks[i])) {
- int rc = PTR_ERR(clks[i]);
-
- if (rc != -EPROBE_DEFER)
- dev_err(dev, "Failed to get %s clock\n",
- clk_names[i]);
- return rc;
- }
+ if (IS_ERR(clks[i]))
+ return dev_err_probe(dev, PTR_ERR(clks[i]),
+ "Failed to get %s clock\n",
+ clk_names[i]);
}
return i;
@@ -1838,6 +1897,13 @@ static int q6v5_pds_attach(struct device *dev, struct device **devs,
while (pd_names[num_pds])
num_pds++;
+ /* Handle single power domain */
+ if (num_pds == 1 && dev->pm_domain) {
+ devs[0] = dev;
+ pm_runtime_enable(dev);
+ return 1;
+ }
+
for (i = 0; i < num_pds; i++) {
devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
if (IS_ERR_OR_NULL(devs[i])) {
@@ -1858,8 +1924,15 @@ unroll_attach:
static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
size_t pd_count)
{
+ struct device *dev = qproc->dev;
int i;
+ /* Handle single power domain */
+ if (pd_count == 1 && dev->pm_domain) {
+ pm_runtime_disable(dev);
+ return;
+ }
+
for (i = 0; i < pd_count; i++)
dev_pm_domain_detach(pds[i], false);
}
@@ -2014,6 +2087,7 @@ static int q6v5_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, qproc);
qproc->has_qaccept_regs = desc->has_qaccept_regs;
+ qproc->has_ext_bhs_reg = desc->has_ext_bhs_reg;
qproc->has_ext_cntl_regs = desc->has_ext_cntl_regs;
qproc->has_vq6 = desc->has_vq6;
qproc->has_spare_reg = desc->has_spare_reg;
@@ -2027,42 +2101,32 @@ static int q6v5_probe(struct platform_device *pdev)
ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
desc->proxy_clk_names);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
+ if (ret < 0)
return ret;
- }
qproc->proxy_clk_count = ret;
ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
desc->reset_clk_names);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to get reset clocks.\n");
+ if (ret < 0)
return ret;
- }
qproc->reset_clk_count = ret;
ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
desc->active_clk_names);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to get active clocks.\n");
+ if (ret < 0)
return ret;
- }
qproc->active_clk_count = ret;
ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
desc->proxy_supply);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
+ if (ret < 0)
return ret;
- }
qproc->proxy_reg_count = ret;
ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs,
desc->active_supply);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to get active regulators.\n");
+ if (ret < 0)
return ret;
- }
qproc->active_reg_count = ret;
ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
@@ -2072,10 +2136,8 @@ static int q6v5_probe(struct platform_device *pdev)
ret = q6v5_regulator_init(&pdev->dev,
qproc->fallback_proxy_regs,
desc->fallback_proxy_supply);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to get fallback proxy regulators.\n");
+ if (ret < 0)
return ret;
- }
qproc->fallback_proxy_reg_count = ret;
} else if (ret < 0) {
dev_err(&pdev->dev, "Failed to init power domains\n");
@@ -2102,6 +2164,7 @@ static int q6v5_probe(struct platform_device *pdev)
qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss");
qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
+ qcom_add_pdm_subdev(rproc, &qproc->pdm_subdev);
qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
if (IS_ERR(qproc->sysmon)) {
@@ -2143,6 +2206,7 @@ static void q6v5_remove(struct platform_device *pdev)
qcom_q6v5_deinit(&qproc->q6v5);
qcom_remove_sysmon_subdev(qproc->sysmon);
qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
+ qcom_remove_pdm_subdev(rproc, &qproc->pdm_subdev);
qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
@@ -2177,6 +2241,7 @@ static const struct rproc_hexagon_res sc7180_mss = {
.has_mba_logs = true,
.has_spare_reg = true,
.has_qaccept_regs = false,
+ .has_ext_bhs_reg = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_SC7180,
@@ -2205,6 +2270,7 @@ static const struct rproc_hexagon_res sc7280_mss = {
.has_mba_logs = true,
.has_spare_reg = false,
.has_qaccept_regs = true,
+ .has_ext_bhs_reg = false,
.has_ext_cntl_regs = true,
.has_vq6 = true,
.version = MSS_SC7280,
@@ -2236,6 +2302,7 @@ static const struct rproc_hexagon_res sdm660_mss = {
.has_mba_logs = false,
.has_spare_reg = false,
.has_qaccept_regs = false,
+ .has_ext_bhs_reg = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_SDM660,
@@ -2271,6 +2338,7 @@ static const struct rproc_hexagon_res sdm845_mss = {
.has_mba_logs = false,
.has_spare_reg = false,
.has_qaccept_regs = false,
+ .has_ext_bhs_reg = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_SDM845,
@@ -2302,6 +2370,7 @@ static const struct rproc_hexagon_res msm8998_mss = {
.has_mba_logs = false,
.has_spare_reg = false,
.has_qaccept_regs = false,
+ .has_ext_bhs_reg = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_MSM8998,
@@ -2340,6 +2409,7 @@ static const struct rproc_hexagon_res msm8996_mss = {
.has_mba_logs = false,
.has_spare_reg = false,
.has_qaccept_regs = false,
+ .has_ext_bhs_reg = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_MSM8996,
@@ -2374,6 +2444,7 @@ static const struct rproc_hexagon_res msm8909_mss = {
.has_mba_logs = false,
.has_spare_reg = false,
.has_qaccept_regs = false,
+ .has_ext_bhs_reg = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_MSM8909,
@@ -2419,6 +2490,7 @@ static const struct rproc_hexagon_res msm8916_mss = {
.has_mba_logs = false,
.has_spare_reg = false,
.has_qaccept_regs = false,
+ .has_ext_bhs_reg = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_MSM8916,
@@ -2454,6 +2526,7 @@ static const struct rproc_hexagon_res msm8953_mss = {
.has_mba_logs = false,
.has_spare_reg = false,
.has_qaccept_regs = false,
+ .has_ext_bhs_reg = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
.version = MSS_MSM8953,
@@ -2466,17 +2539,101 @@ static const struct rproc_hexagon_res msm8974_mss = {
.supply = "pll",
.uA = 100000,
},
+ {
+ .supply = "mx",
+ .uV = 1050000,
+ },
{}
},
.fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
{
+ .supply = "cx",
+ .uA = 100000,
+ },
+ {}
+ },
+ .active_supply = (struct qcom_mss_reg_res[]) {
+ {
+ .supply = "mss",
+ .uV = 1050000,
+ .uA = 100000,
+ },
+ {}
+ },
+ .proxy_clk_names = (char*[]){
+ "xo",
+ NULL
+ },
+ .active_clk_names = (char*[]){
+ "iface",
+ "bus",
+ "mem",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
+ .need_mem_protection = false,
+ .has_alt_reset = false,
+ .has_mba_logs = false,
+ .has_spare_reg = false,
+ .has_qaccept_regs = false,
+ .has_ext_bhs_reg = false,
+ .has_ext_cntl_regs = false,
+ .has_vq6 = false,
+ .version = MSS_MSM8974,
+};
+
+static const struct rproc_hexagon_res msm8226_mss = {
+ .hexagon_mba_image = "mba.b00",
+ .proxy_supply = (struct qcom_mss_reg_res[]) {
+ {
+ .supply = "pll",
+ .uA = 100000,
+ },
+ {
.supply = "mx",
.uV = 1050000,
},
+ {}
+ },
+ .proxy_clk_names = (char*[]){
+ "xo",
+ NULL
+ },
+ .active_clk_names = (char*[]){
+ "iface",
+ "bus",
+ "mem",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
+ .need_mem_protection = false,
+ .has_alt_reset = false,
+ .has_mba_logs = false,
+ .has_spare_reg = false,
+ .has_qaccept_regs = false,
+ .has_ext_bhs_reg = true,
+ .has_ext_cntl_regs = false,
+ .has_vq6 = false,
+ .version = MSS_MSM8226,
+};
+
+static const struct rproc_hexagon_res msm8926_mss = {
+ .hexagon_mba_image = "mba.b00",
+ .proxy_supply = (struct qcom_mss_reg_res[]) {
{
- .supply = "cx",
+ .supply = "pll",
.uA = 100000,
},
+ {
+ .supply = "mx",
+ .uV = 1050000,
+ },
{}
},
.active_supply = (struct qcom_mss_reg_res[]) {
@@ -2498,7 +2655,6 @@ static const struct rproc_hexagon_res msm8974_mss = {
NULL
},
.proxy_pd_names = (char*[]){
- "mx",
"cx",
NULL
},
@@ -2507,15 +2663,18 @@ static const struct rproc_hexagon_res msm8974_mss = {
.has_mba_logs = false,
.has_spare_reg = false,
.has_qaccept_regs = false,
+ .has_ext_bhs_reg = false,
.has_ext_cntl_regs = false,
.has_vq6 = false,
- .version = MSS_MSM8974,
+ .version = MSS_MSM8926,
};
static const struct of_device_id q6v5_of_match[] = {
{ .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
+ { .compatible = "qcom,msm8226-mss-pil", .data = &msm8226_mss},
{ .compatible = "qcom,msm8909-mss-pil", .data = &msm8909_mss},
{ .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
+ { .compatible = "qcom,msm8926-mss-pil", .data = &msm8926_mss},
{ .compatible = "qcom,msm8953-mss-pil", .data = &msm8953_mss},
{ .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
{ .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
@@ -2530,7 +2689,7 @@ MODULE_DEVICE_TABLE(of, q6v5_of_match);
static struct platform_driver q6v5_driver = {
.probe = q6v5_probe,
- .remove_new = q6v5_remove,
+ .remove = q6v5_remove,
.driver = {
.name = "qcom-q6v5-mss",
.of_match_table = q6v5_of_match,
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 54d8005d40a3..b306f223127c 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -52,6 +52,7 @@ struct adsp_data {
const char *ssr_name;
const char *sysmon_name;
int ssctl_id;
+ unsigned int smem_host_id;
int region_assign_idx;
int region_assign_count;
@@ -81,6 +82,7 @@ struct qcom_adsp {
int lite_pas_id;
unsigned int minidump_id;
int crash_reason_smem;
+ unsigned int smem_host_id;
bool decrypt_shutdown;
const char *info_name;
@@ -109,6 +111,7 @@ struct qcom_adsp {
struct qcom_rproc_glink glink_subdev;
struct qcom_rproc_subdev smd_subdev;
+ struct qcom_rproc_pdm pdm_subdev;
struct qcom_rproc_ssr ssr_subdev;
struct qcom_sysmon *sysmon;
@@ -399,6 +402,9 @@ static int adsp_stop(struct rproc *rproc)
if (handover)
qcom_pas_handover(&adsp->q6v5);
+ if (adsp->smem_host_id)
+ ret = qcom_smem_bust_hwspin_lock_by_host(adsp->smem_host_id);
+
return ret;
}
@@ -447,24 +453,16 @@ static const struct rproc_ops adsp_minidump_ops = {
static int adsp_init_clock(struct qcom_adsp *adsp)
{
- int ret;
-
adsp->xo = devm_clk_get(adsp->dev, "xo");
- if (IS_ERR(adsp->xo)) {
- ret = PTR_ERR(adsp->xo);
- if (ret != -EPROBE_DEFER)
- dev_err(adsp->dev, "failed to get xo clock");
- return ret;
- }
+ if (IS_ERR(adsp->xo))
+ return dev_err_probe(adsp->dev, PTR_ERR(adsp->xo),
+ "failed to get xo clock");
+
adsp->aggre2_clk = devm_clk_get_optional(adsp->dev, "aggre2");
- if (IS_ERR(adsp->aggre2_clk)) {
- ret = PTR_ERR(adsp->aggre2_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(adsp->dev,
- "failed to get aggre2 clock");
- return ret;
- }
+ if (IS_ERR(adsp->aggre2_clk))
+ return dev_err_probe(adsp->dev, PTR_ERR(adsp->aggre2_clk),
+ "failed to get aggre2 clock");
return 0;
}
@@ -503,16 +501,16 @@ static int adsp_pds_attach(struct device *dev, struct device **devs,
if (!pd_names)
return 0;
+ while (pd_names[num_pds])
+ num_pds++;
+
/* Handle single power domain */
- if (dev->pm_domain) {
+ if (num_pds == 1 && dev->pm_domain) {
devs[0] = dev;
pm_runtime_enable(dev);
return 1;
}
- while (pd_names[num_pds])
- num_pds++;
-
for (i = 0; i < num_pds; i++) {
devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
if (IS_ERR_OR_NULL(devs[i])) {
@@ -537,7 +535,7 @@ static void adsp_pds_detach(struct qcom_adsp *adsp, struct device **pds,
int i;
/* Handle single power domain */
- if (dev->pm_domain && pd_count) {
+ if (pd_count == 1 && dev->pm_domain) {
pm_runtime_disable(dev);
return;
}
@@ -710,7 +708,7 @@ static int adsp_probe(struct platform_device *pdev)
if (desc->minidump_id)
ops = &adsp_minidump_ops;
- rproc = devm_rproc_alloc(&pdev->dev, pdev->name, ops, fw_name, sizeof(*adsp));
+ rproc = devm_rproc_alloc(&pdev->dev, desc->sysmon_name, ops, fw_name, sizeof(*adsp));
if (!rproc) {
dev_err(&pdev->dev, "unable to allocate remoteproc\n");
@@ -727,6 +725,7 @@ static int adsp_probe(struct platform_device *pdev)
adsp->pas_id = desc->pas_id;
adsp->lite_pas_id = desc->lite_pas_id;
adsp->info_name = desc->sysmon_name;
+ adsp->smem_host_id = desc->smem_host_id;
adsp->decrypt_shutdown = desc->decrypt_shutdown;
adsp->region_assign_idx = desc->region_assign_idx;
adsp->region_assign_count = min_t(int, MAX_ASSIGN_COUNT, desc->region_assign_count);
@@ -752,16 +751,16 @@ static int adsp_probe(struct platform_device *pdev)
ret = adsp_init_clock(adsp);
if (ret)
- goto free_rproc;
+ goto unassign_mem;
ret = adsp_init_regulator(adsp);
if (ret)
- goto free_rproc;
+ goto unassign_mem;
ret = adsp_pds_attach(&pdev->dev, adsp->proxy_pds,
desc->proxy_pd_names);
if (ret < 0)
- goto free_rproc;
+ goto unassign_mem;
adsp->proxy_pd_count = ret;
ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem, desc->load_state,
@@ -771,23 +770,34 @@ static int adsp_probe(struct platform_device *pdev)
qcom_add_glink_subdev(rproc, &adsp->glink_subdev, desc->ssr_name);
qcom_add_smd_subdev(rproc, &adsp->smd_subdev);
+ qcom_add_pdm_subdev(rproc, &adsp->pdm_subdev);
adsp->sysmon = qcom_add_sysmon_subdev(rproc,
desc->sysmon_name,
desc->ssctl_id);
if (IS_ERR(adsp->sysmon)) {
ret = PTR_ERR(adsp->sysmon);
- goto detach_proxy_pds;
+ goto deinit_remove_pdm_smd_glink;
}
qcom_add_ssr_subdev(rproc, &adsp->ssr_subdev, desc->ssr_name);
ret = rproc_add(rproc);
if (ret)
- goto detach_proxy_pds;
+ goto remove_ssr_sysmon;
return 0;
+remove_ssr_sysmon:
+ qcom_remove_ssr_subdev(rproc, &adsp->ssr_subdev);
+ qcom_remove_sysmon_subdev(adsp->sysmon);
+deinit_remove_pdm_smd_glink:
+ qcom_remove_pdm_subdev(rproc, &adsp->pdm_subdev);
+ qcom_remove_smd_subdev(rproc, &adsp->smd_subdev);
+ qcom_remove_glink_subdev(rproc, &adsp->glink_subdev);
+ qcom_q6v5_deinit(&adsp->q6v5);
detach_proxy_pds:
adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+unassign_mem:
+ adsp_unassign_memory_region(adsp);
free_rproc:
device_init_wakeup(adsp->dev, false);
@@ -805,6 +815,7 @@ static void adsp_remove(struct platform_device *pdev)
qcom_remove_glink_subdev(adsp->rproc, &adsp->glink_subdev);
qcom_remove_sysmon_subdev(adsp->sysmon);
qcom_remove_smd_subdev(adsp->rproc, &adsp->smd_subdev);
+ qcom_remove_pdm_subdev(adsp->rproc, &adsp->pdm_subdev);
qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev);
adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
device_init_wakeup(adsp->dev, false);
@@ -820,6 +831,23 @@ static const struct adsp_data adsp_resource_init = {
.ssctl_id = 0x14,
};
+static const struct adsp_data sa8775p_adsp_resource = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mbn",
+ .pas_id = 1,
+ .minidump_id = 5,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .load_state = "adsp",
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
static const struct adsp_data sdm845_adsp_resource_init = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
@@ -881,6 +909,7 @@ static const struct adsp_data sm8250_adsp_resource = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
.pas_id = 1,
+ .minidump_id = 5,
.auto_boot = true,
.proxy_pd_names = (char*[]){
"lcx",
@@ -933,6 +962,42 @@ static const struct adsp_data cdsp_resource_init = {
.ssctl_id = 0x17,
};
+static const struct adsp_data sa8775p_cdsp0_resource = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp0.mbn",
+ .pas_id = 18,
+ .minidump_id = 7,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mxc",
+ "nsp",
+ NULL
+ },
+ .load_state = "cdsp",
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
+static const struct adsp_data sa8775p_cdsp1_resource = {
+ .crash_reason_smem = 633,
+ .firmware_name = "cdsp1.mbn",
+ .pas_id = 30,
+ .minidump_id = 20,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mxc",
+ "nsp",
+ NULL
+ },
+ .load_state = "nsp",
+ .ssr_name = "cdsp1",
+ .sysmon_name = "cdsp1",
+ .ssctl_id = 0x20,
+};
+
static const struct adsp_data sdm845_cdsp_resource_init = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
@@ -1062,6 +1127,7 @@ static const struct adsp_data sm8350_cdsp_resource = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
.pas_id = 18,
+ .minidump_id = 7,
.auto_boot = true,
.proxy_pd_names = (char*[]){
"cx",
@@ -1074,6 +1140,40 @@ static const struct adsp_data sm8350_cdsp_resource = {
.ssctl_id = 0x17,
};
+static const struct adsp_data sa8775p_gpdsp0_resource = {
+ .crash_reason_smem = 640,
+ .firmware_name = "gpdsp0.mbn",
+ .pas_id = 39,
+ .minidump_id = 21,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mxc",
+ NULL
+ },
+ .load_state = "gpdsp0",
+ .ssr_name = "gpdsp0",
+ .sysmon_name = "gpdsp0",
+ .ssctl_id = 0x21,
+};
+
+static const struct adsp_data sa8775p_gpdsp1_resource = {
+ .crash_reason_smem = 641,
+ .firmware_name = "gpdsp1.mbn",
+ .pas_id = 40,
+ .minidump_id = 22,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mxc",
+ NULL
+ },
+ .load_state = "gpdsp1",
+ .ssr_name = "gpdsp1",
+ .sysmon_name = "gpdsp1",
+ .ssctl_id = 0x22,
+};
+
static const struct adsp_data mpss_resource_init = {
.crash_reason_smem = 421,
.firmware_name = "modem.mdt",
@@ -1196,6 +1296,7 @@ static const struct adsp_data sm8550_adsp_resource = {
.ssr_name = "lpass",
.sysmon_name = "adsp",
.ssctl_id = 0x14,
+ .smem_host_id = 2,
};
static const struct adsp_data sm8550_cdsp_resource = {
@@ -1216,6 +1317,7 @@ static const struct adsp_data sm8550_cdsp_resource = {
.ssr_name = "cdsp",
.sysmon_name = "cdsp",
.ssctl_id = 0x17,
+ .smem_host_id = 5,
};
static const struct adsp_data sm8550_mpss_resource = {
@@ -1236,6 +1338,7 @@ static const struct adsp_data sm8550_mpss_resource = {
.ssr_name = "mpss",
.sysmon_name = "modem",
.ssctl_id = 0x12,
+ .smem_host_id = 1,
.region_assign_idx = 2,
.region_assign_count = 1,
.region_assign_vmid = QCOM_SCM_VMID_MSS_MSA,
@@ -1245,7 +1348,8 @@ static const struct adsp_data sc7280_wpss_resource = {
.crash_reason_smem = 626,
.firmware_name = "wpss.mdt",
.pas_id = 6,
- .auto_boot = true,
+ .minidump_id = 4,
+ .auto_boot = false,
.proxy_pd_names = (char*[]){
"cx",
"mx",
@@ -1275,6 +1379,7 @@ static const struct adsp_data sm8650_cdsp_resource = {
.ssr_name = "cdsp",
.sysmon_name = "cdsp",
.ssctl_id = 0x17,
+ .smem_host_id = 5,
.region_assign_idx = 2,
.region_assign_count = 1,
.region_assign_shared = true,
@@ -1299,13 +1404,38 @@ static const struct adsp_data sm8650_mpss_resource = {
.ssr_name = "mpss",
.sysmon_name = "modem",
.ssctl_id = 0x12,
+ .smem_host_id = 1,
.region_assign_idx = 2,
.region_assign_count = 3,
.region_assign_vmid = QCOM_SCM_VMID_MSS_MSA,
};
+static const struct adsp_data sm8750_mpss_resource = {
+ .crash_reason_smem = 421,
+ .firmware_name = "modem.mdt",
+ .dtb_firmware_name = "modem_dtb.mdt",
+ .pas_id = 4,
+ .dtb_pas_id = 0x26,
+ .minidump_id = 3,
+ .auto_boot = false,
+ .decrypt_shutdown = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mss",
+ NULL
+ },
+ .load_state = "modem",
+ .ssr_name = "mpss",
+ .sysmon_name = "modem",
+ .ssctl_id = 0x12,
+ .smem_host_id = 1,
+ .region_assign_idx = 2,
+ .region_assign_count = 2,
+ .region_assign_vmid = QCOM_SCM_VMID_MSS_MSA,
+};
+
static const struct of_device_id adsp_of_match[] = {
- { .compatible = "qcom,msm8226-adsp-pil", .data = &adsp_resource_init},
+ { .compatible = "qcom,msm8226-adsp-pil", .data = &msm8996_adsp_resource},
{ .compatible = "qcom,msm8953-adsp-pil", .data = &msm8996_adsp_resource},
{ .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8996-adsp-pil", .data = &msm8996_adsp_resource},
@@ -1315,6 +1445,12 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,qcs404-adsp-pas", .data = &adsp_resource_init },
{ .compatible = "qcom,qcs404-cdsp-pas", .data = &cdsp_resource_init },
{ .compatible = "qcom,qcs404-wcss-pas", .data = &wcss_resource_init },
+ { .compatible = "qcom,sa8775p-adsp-pas", .data = &sa8775p_adsp_resource},
+ { .compatible = "qcom,sa8775p-cdsp0-pas", .data = &sa8775p_cdsp0_resource},
+ { .compatible = "qcom,sa8775p-cdsp1-pas", .data = &sa8775p_cdsp1_resource},
+ { .compatible = "qcom,sa8775p-gpdsp0-pas", .data = &sa8775p_gpdsp0_resource},
+ { .compatible = "qcom,sa8775p-gpdsp1-pas", .data = &sa8775p_gpdsp1_resource},
+ { .compatible = "qcom,sar2130p-adsp-pas", .data = &sm8350_adsp_resource},
{ .compatible = "qcom,sc7180-adsp-pas", .data = &sm8250_adsp_resource},
{ .compatible = "qcom,sc7180-mpss-pas", .data = &mpss_resource_init},
{ .compatible = "qcom,sc7280-adsp-pas", .data = &sm8350_adsp_resource},
@@ -1332,6 +1468,7 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sdm845-cdsp-pas", .data = &sdm845_cdsp_resource_init},
{ .compatible = "qcom,sdm845-slpi-pas", .data = &sdm845_slpi_resource_init},
{ .compatible = "qcom,sdx55-mpss-pas", .data = &sdx55_mpss_resource},
+ { .compatible = "qcom,sdx75-mpss-pas", .data = &sm8650_mpss_resource},
{ .compatible = "qcom,sm6115-adsp-pas", .data = &adsp_resource_init},
{ .compatible = "qcom,sm6115-cdsp-pas", .data = &cdsp_resource_init},
{ .compatible = "qcom,sm6115-mpss-pas", .data = &sc8180x_mpss_resource},
@@ -1362,6 +1499,7 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sm8650-adsp-pas", .data = &sm8550_adsp_resource},
{ .compatible = "qcom,sm8650-cdsp-pas", .data = &sm8650_cdsp_resource},
{ .compatible = "qcom,sm8650-mpss-pas", .data = &sm8650_mpss_resource},
+ { .compatible = "qcom,sm8750-mpss-pas", .data = &sm8750_mpss_resource},
{ .compatible = "qcom,x1e80100-adsp-pas", .data = &x1e80100_adsp_resource},
{ .compatible = "qcom,x1e80100-cdsp-pas", .data = &x1e80100_cdsp_resource},
{ },
@@ -1370,7 +1508,7 @@ MODULE_DEVICE_TABLE(of, adsp_of_match);
static struct platform_driver adsp_driver = {
.probe = adsp_probe,
- .remove_new = adsp_remove,
+ .remove = adsp_remove,
.driver = {
.name = "qcom_q6v5_pas",
.of_match_table = adsp_of_match,
diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c
index 94f68c919ee6..93648734a2f2 100644
--- a/drivers/remoteproc/qcom_q6v5_wcss.c
+++ b/drivers/remoteproc/qcom_q6v5_wcss.c
@@ -148,6 +148,7 @@ struct q6v5_wcss {
bool requires_force_stop;
struct qcom_rproc_glink glink_subdev;
+ struct qcom_rproc_pdm pdm_subdev;
struct qcom_rproc_ssr ssr_subdev;
};
@@ -901,90 +902,58 @@ static int q6v5_alloc_memory_region(struct q6v5_wcss *wcss)
static int q6v5_wcss_init_clock(struct q6v5_wcss *wcss)
{
- int ret;
-
wcss->xo = devm_clk_get(wcss->dev, "xo");
- if (IS_ERR(wcss->xo)) {
- ret = PTR_ERR(wcss->xo);
- if (ret != -EPROBE_DEFER)
- dev_err(wcss->dev, "failed to get xo clock");
- return ret;
- }
+ if (IS_ERR(wcss->xo))
+ return dev_err_probe(wcss->dev, PTR_ERR(wcss->xo),
+ "failed to get xo clock");
wcss->gcc_abhs_cbcr = devm_clk_get(wcss->dev, "gcc_abhs_cbcr");
- if (IS_ERR(wcss->gcc_abhs_cbcr)) {
- ret = PTR_ERR(wcss->gcc_abhs_cbcr);
- if (ret != -EPROBE_DEFER)
- dev_err(wcss->dev, "failed to get gcc abhs clock");
- return ret;
- }
+ if (IS_ERR(wcss->gcc_abhs_cbcr))
+ return dev_err_probe(wcss->dev, PTR_ERR(wcss->gcc_abhs_cbcr),
+ "failed to get gcc abhs clock");
wcss->gcc_axim_cbcr = devm_clk_get(wcss->dev, "gcc_axim_cbcr");
- if (IS_ERR(wcss->gcc_axim_cbcr)) {
- ret = PTR_ERR(wcss->gcc_axim_cbcr);
- if (ret != -EPROBE_DEFER)
- dev_err(wcss->dev, "failed to get gcc axim clock\n");
- return ret;
- }
+ if (IS_ERR(wcss->gcc_axim_cbcr))
+ return dev_err_probe(wcss->dev, PTR_ERR(wcss->gcc_axim_cbcr),
+ "failed to get gcc axim clock\n");
wcss->ahbfabric_cbcr_clk = devm_clk_get(wcss->dev,
"lcc_ahbfabric_cbc");
- if (IS_ERR(wcss->ahbfabric_cbcr_clk)) {
- ret = PTR_ERR(wcss->ahbfabric_cbcr_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(wcss->dev, "failed to get ahbfabric clock\n");
- return ret;
- }
+ if (IS_ERR(wcss->ahbfabric_cbcr_clk))
+ return dev_err_probe(wcss->dev, PTR_ERR(wcss->ahbfabric_cbcr_clk),
+ "failed to get ahbfabric clock\n");
wcss->lcc_csr_cbcr = devm_clk_get(wcss->dev, "tcsr_lcc_cbc");
- if (IS_ERR(wcss->lcc_csr_cbcr)) {
- ret = PTR_ERR(wcss->lcc_csr_cbcr);
- if (ret != -EPROBE_DEFER)
- dev_err(wcss->dev, "failed to get csr cbcr clk\n");
- return ret;
- }
+ if (IS_ERR(wcss->lcc_csr_cbcr))
+ return dev_err_probe(wcss->dev, PTR_ERR(wcss->lcc_csr_cbcr),
+ "failed to get csr cbcr clk\n");
wcss->ahbs_cbcr = devm_clk_get(wcss->dev,
"lcc_abhs_cbc");
- if (IS_ERR(wcss->ahbs_cbcr)) {
- ret = PTR_ERR(wcss->ahbs_cbcr);
- if (ret != -EPROBE_DEFER)
- dev_err(wcss->dev, "failed to get ahbs_cbcr clk\n");
- return ret;
- }
+ if (IS_ERR(wcss->ahbs_cbcr))
+ return dev_err_probe(wcss->dev, PTR_ERR(wcss->ahbs_cbcr),
+ "failed to get ahbs_cbcr clk\n");
wcss->tcm_slave_cbcr = devm_clk_get(wcss->dev,
"lcc_tcm_slave_cbc");
- if (IS_ERR(wcss->tcm_slave_cbcr)) {
- ret = PTR_ERR(wcss->tcm_slave_cbcr);
- if (ret != -EPROBE_DEFER)
- dev_err(wcss->dev, "failed to get tcm cbcr clk\n");
- return ret;
- }
+ if (IS_ERR(wcss->tcm_slave_cbcr))
+ return dev_err_probe(wcss->dev, PTR_ERR(wcss->tcm_slave_cbcr),
+ "failed to get tcm cbcr clk\n");
wcss->qdsp6ss_abhm_cbcr = devm_clk_get(wcss->dev, "lcc_abhm_cbc");
- if (IS_ERR(wcss->qdsp6ss_abhm_cbcr)) {
- ret = PTR_ERR(wcss->qdsp6ss_abhm_cbcr);
- if (ret != -EPROBE_DEFER)
- dev_err(wcss->dev, "failed to get abhm cbcr clk\n");
- return ret;
- }
+ if (IS_ERR(wcss->qdsp6ss_abhm_cbcr))
+ return dev_err_probe(wcss->dev, PTR_ERR(wcss->qdsp6ss_abhm_cbcr),
+ "failed to get abhm cbcr clk\n");
wcss->qdsp6ss_axim_cbcr = devm_clk_get(wcss->dev, "lcc_axim_cbc");
- if (IS_ERR(wcss->qdsp6ss_axim_cbcr)) {
- ret = PTR_ERR(wcss->qdsp6ss_axim_cbcr);
- if (ret != -EPROBE_DEFER)
- dev_err(wcss->dev, "failed to get axim cbcr clk\n");
- return ret;
- }
+ if (IS_ERR(wcss->qdsp6ss_axim_cbcr))
+ return dev_err_probe(wcss->dev, PTR_ERR(wcss->qdsp6ss_axim_cbcr),
+ "failed to get axim cbcr clk\n");
wcss->lcc_bcr_sleep = devm_clk_get(wcss->dev, "lcc_bcr_sleep");
- if (IS_ERR(wcss->lcc_bcr_sleep)) {
- ret = PTR_ERR(wcss->lcc_bcr_sleep);
- if (ret != -EPROBE_DEFER)
- dev_err(wcss->dev, "failed to get bcr cbcr clk\n");
- return ret;
- }
+ if (IS_ERR(wcss->lcc_bcr_sleep))
+ return dev_err_probe(wcss->dev, PTR_ERR(wcss->lcc_bcr_sleep),
+ "failed to get bcr cbcr clk\n");
return 0;
}
@@ -1020,7 +989,6 @@ static int q6v5_wcss_probe(struct platform_device *pdev)
wcss = rproc->priv;
wcss->dev = &pdev->dev;
- wcss->version = desc->version;
wcss->version = desc->version;
wcss->requires_force_stop = desc->requires_force_stop;
@@ -1052,20 +1020,36 @@ static int q6v5_wcss_probe(struct platform_device *pdev)
return ret;
qcom_add_glink_subdev(rproc, &wcss->glink_subdev, "q6wcss");
+ qcom_add_pdm_subdev(rproc, &wcss->pdm_subdev);
qcom_add_ssr_subdev(rproc, &wcss->ssr_subdev, "q6wcss");
- if (desc->ssctl_id)
+ if (desc->ssctl_id) {
wcss->sysmon = qcom_add_sysmon_subdev(rproc,
desc->sysmon_name,
desc->ssctl_id);
+ if (IS_ERR(wcss->sysmon)) {
+ ret = PTR_ERR(wcss->sysmon);
+ goto deinit_remove_subdevs;
+ }
+ }
ret = rproc_add(rproc);
if (ret)
- return ret;
+ goto remove_sysmon_subdev;
platform_set_drvdata(pdev, rproc);
return 0;
+
+remove_sysmon_subdev:
+ if (desc->ssctl_id)
+ qcom_remove_sysmon_subdev(wcss->sysmon);
+deinit_remove_subdevs:
+ qcom_q6v5_deinit(&wcss->q6v5);
+ qcom_remove_glink_subdev(rproc, &wcss->glink_subdev);
+ qcom_remove_pdm_subdev(rproc, &wcss->pdm_subdev);
+ qcom_remove_ssr_subdev(rproc, &wcss->ssr_subdev);
+ return ret;
}
static void q6v5_wcss_remove(struct platform_device *pdev)
@@ -1074,6 +1058,7 @@ static void q6v5_wcss_remove(struct platform_device *pdev)
struct q6v5_wcss *wcss = rproc->priv;
qcom_q6v5_deinit(&wcss->q6v5);
+ qcom_remove_pdm_subdev(rproc, &wcss->pdm_subdev);
rproc_del(rproc);
}
@@ -1108,7 +1093,7 @@ MODULE_DEVICE_TABLE(of, q6v5_wcss_of_match);
static struct platform_driver q6v5_wcss_driver = {
.probe = q6v5_wcss_probe,
- .remove_new = q6v5_wcss_remove,
+ .remove = q6v5_wcss_remove,
.driver = {
.name = "qcom-q6v5-wcss-pil",
.of_match_table = q6v5_wcss_of_match,
diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
index c24e4a882873..660ac6fc4082 100644
--- a/drivers/remoteproc/qcom_sysmon.c
+++ b/drivers/remoteproc/qcom_sysmon.c
@@ -619,7 +619,7 @@ static irqreturn_t sysmon_shutdown_interrupt(int irq, void *data)
* @name: name of this subdev, to use in SSR
* @ssctl_instance: instance id of the ssctl QMI service
*
- * Return: A new qcom_sysmon object, or NULL on failure
+ * Return: A new qcom_sysmon object, or an error pointer on failure
*/
struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
const char *name,
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index a7bb9da27029..2c7e519a2254 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -117,10 +117,10 @@ static const struct wcnss_data pronto_v1_data = {
.pmu_offset = 0x1004,
.spare_offset = 0x1088,
- .pd_names = { "mx", "cx" },
+ .pd_names = { "cx", "mx" },
.vregs = (struct wcnss_vreg_info[]) {
- { "vddmx", 950000, 1150000, 0 },
{ "vddcx", .super_turbo = true},
+ { "vddmx", 950000, 1150000, 0 },
{ "vddpx", 1800000, 1800000, 0 },
},
.num_pd_vregs = 2,
@@ -131,10 +131,10 @@ static const struct wcnss_data pronto_v2_data = {
.pmu_offset = 0x1004,
.spare_offset = 0x1088,
- .pd_names = { "mx", "cx" },
+ .pd_names = { "cx", "mx" },
.vregs = (struct wcnss_vreg_info[]) {
- { "vddmx", 1287500, 1287500, 0 },
{ "vddcx", .super_turbo = true },
+ { "vddmx", 1287500, 1287500, 0 },
{ "vddpx", 1800000, 1800000, 0 },
},
.num_pd_vregs = 2,
@@ -397,8 +397,17 @@ static irqreturn_t wcnss_stop_ack_interrupt(int irq, void *dev)
static int wcnss_init_pds(struct qcom_wcnss *wcnss,
const char * const pd_names[WCNSS_MAX_PDS])
{
+ struct device *dev = wcnss->dev;
int i, ret;
+ /* Handle single power domain */
+ if (dev->pm_domain) {
+ wcnss->pds[0] = dev;
+ wcnss->num_pds = 1;
+ pm_runtime_enable(dev);
+ return 0;
+ }
+
for (i = 0; i < WCNSS_MAX_PDS; i++) {
if (!pd_names[i])
break;
@@ -418,8 +427,15 @@ static int wcnss_init_pds(struct qcom_wcnss *wcnss,
static void wcnss_release_pds(struct qcom_wcnss *wcnss)
{
+ struct device *dev = wcnss->dev;
int i;
+ /* Handle single power domain */
+ if (wcnss->num_pds == 1 && dev->pm_domain) {
+ pm_runtime_disable(dev);
+ return;
+ }
+
for (i = 0; i < wcnss->num_pds; i++)
dev_pm_domain_detach(wcnss->pds[i], false);
}
@@ -437,10 +453,14 @@ static int wcnss_init_regulators(struct qcom_wcnss *wcnss,
* the regulators for the power domains. For old device trees we need to
* reserve extra space to manage them through the regulator interface.
*/
- if (wcnss->num_pds)
- info += num_pd_vregs;
- else
+ if (wcnss->num_pds) {
+ info += wcnss->num_pds;
+ /* Handle single power domain case */
+ if (wcnss->num_pds < num_pd_vregs)
+ num_vregs += num_pd_vregs - wcnss->num_pds;
+ } else {
num_vregs += num_pd_vregs;
+ }
bulk = devm_kcalloc(wcnss->dev,
num_vregs, sizeof(struct regulator_bulk_data),
@@ -682,7 +702,7 @@ MODULE_DEVICE_TABLE(of, wcnss_of_match);
static struct platform_driver wcnss_driver = {
.probe = wcnss_probe,
- .remove_new = wcnss_remove,
+ .remove = wcnss_remove,
.driver = {
.name = "qcom-wcnss-pil",
.of_match_table = wcnss_of_match,
diff --git a/drivers/remoteproc/qcom_wcnss_iris.c b/drivers/remoteproc/qcom_wcnss_iris.c
index dd36fd077911..b989718776bd 100644
--- a/drivers/remoteproc/qcom_wcnss_iris.c
+++ b/drivers/remoteproc/qcom_wcnss_iris.c
@@ -155,9 +155,8 @@ struct qcom_iris *qcom_iris_probe(struct device *parent, bool *use_48mhz_xo)
iris->xo_clk = devm_clk_get(&iris->dev, "xo");
if (IS_ERR(iris->xo_clk)) {
- ret = PTR_ERR(iris->xo_clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&iris->dev, "failed to acquire xo clk\n");
+ ret = dev_err_probe(&iris->dev, PTR_ERR(iris->xo_clk),
+ "failed to acquire xo clk\n");
goto err_device_del;
}
diff --git a/drivers/remoteproc/rcar_rproc.c b/drivers/remoteproc/rcar_rproc.c
index cc17e8421f65..921d853594f4 100644
--- a/drivers/remoteproc/rcar_rproc.c
+++ b/drivers/remoteproc/rcar_rproc.c
@@ -214,7 +214,7 @@ MODULE_DEVICE_TABLE(of, rcar_rproc_of_match);
static struct platform_driver rcar_rproc_driver = {
.probe = rcar_rproc_probe,
- .remove_new = rcar_rproc_remove,
+ .remove = rcar_rproc_remove,
.driver = {
.name = "rcar-rproc",
.of_match_table = rcar_rproc_of_match,
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index f276956f2c5c..b21eedefff87 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -109,10 +109,10 @@ static int rproc_enable_iommu(struct rproc *rproc)
return 0;
}
- domain = iommu_domain_alloc(dev->bus);
- if (!domain) {
+ domain = iommu_paging_domain_alloc(dev);
+ if (IS_ERR(domain)) {
dev_err(dev, "can't alloc iommu domain\n");
- return -ENOMEM;
+ return PTR_ERR(domain);
}
iommu_set_fault_handler(domain, rproc_iommu_fault, rproc);
@@ -2025,6 +2025,7 @@ int rproc_shutdown(struct rproc *rproc)
kfree(rproc->cached_table);
rproc->cached_table = NULL;
rproc->table_ptr = NULL;
+ rproc->table_sz = 0;
out:
mutex_unlock(&rproc->lock);
return ret;
@@ -2486,6 +2487,13 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
rproc->dev.driver_data = rproc;
idr_init(&rproc->notifyids);
+ /* Assign a unique device index and name */
+ rproc->index = ida_alloc(&rproc_dev_index, GFP_KERNEL);
+ if (rproc->index < 0) {
+ dev_err(dev, "ida_alloc failed: %d\n", rproc->index);
+ goto put_device;
+ }
+
rproc->name = kstrdup_const(name, GFP_KERNEL);
if (!rproc->name)
goto put_device;
@@ -2496,13 +2504,6 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
if (rproc_alloc_ops(rproc, ops))
goto put_device;
- /* Assign a unique device index and name */
- rproc->index = ida_alloc(&rproc_dev_index, GFP_KERNEL);
- if (rproc->index < 0) {
- dev_err(dev, "ida_alloc failed: %d\n", rproc->index);
- goto put_device;
- }
-
dev_set_name(&rproc->dev, "remoteproc%d", rproc->index);
atomic_set(&rproc->power, 0);
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index f62a82d71dfa..0cd09e67ac14 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -72,7 +72,7 @@ void rproc_init_debugfs(void);
void rproc_exit_debugfs(void);
/* from remoteproc_sysfs.c */
-extern struct class rproc_class;
+extern const struct class rproc_class;
int rproc_init_sysfs(void);
void rproc_exit_sysfs(void);
diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c
index 8c7ea8922638..138e752c5e4e 100644
--- a/drivers/remoteproc/remoteproc_sysfs.c
+++ b/drivers/remoteproc/remoteproc_sysfs.c
@@ -254,7 +254,7 @@ static const struct attribute_group *rproc_devgroups[] = {
NULL
};
-struct class rproc_class = {
+const struct class rproc_class = {
.name = "remoteproc",
.dev_groups = rproc_devgroups,
};
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 25b66b113b69..25a655f33ec0 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -182,21 +182,21 @@ static void rproc_virtio_del_vqs(struct virtio_device *vdev)
static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char * const names[],
- const bool * ctx,
+ struct virtqueue_info vqs_info[],
struct irq_affinity *desc)
{
int i, ret, queue_idx = 0;
for (i = 0; i < nvqs; ++i) {
- if (!names[i]) {
+ struct virtqueue_info *vqi = &vqs_info[i];
+
+ if (!vqi->name) {
vqs[i] = NULL;
continue;
}
- vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i],
- ctx ? ctx[i] : false);
+ vqs[i] = rp_find_vq(vdev, queue_idx++, vqi->callback,
+ vqi->name, vqi->ctx);
if (IS_ERR(vqs[i])) {
ret = PTR_ERR(vqs[i]);
goto error;
@@ -593,7 +593,7 @@ static void rproc_virtio_remove(struct platform_device *pdev)
/* Platform driver */
static struct platform_driver rproc_virtio_driver = {
.probe = rproc_virtio_probe,
- .remove_new = rproc_virtio_remove,
+ .remove = rproc_virtio_remove,
.driver = {
.name = "rproc-virtio",
},
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
index 1340be9d0110..e6566a9839dc 100644
--- a/drivers/remoteproc/st_remoteproc.c
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -290,26 +290,23 @@ static int st_rproc_parse_dt(struct platform_device *pdev)
if (ddata->config->sw_reset) {
ddata->sw_reset = devm_reset_control_get_exclusive(dev,
"sw_reset");
- if (IS_ERR(ddata->sw_reset)) {
- dev_err(dev, "Failed to get S/W Reset\n");
- return PTR_ERR(ddata->sw_reset);
- }
+ if (IS_ERR(ddata->sw_reset))
+ return dev_err_probe(dev, PTR_ERR(ddata->sw_reset),
+ "Failed to get S/W Reset\n");
}
if (ddata->config->pwr_reset) {
ddata->pwr_reset = devm_reset_control_get_exclusive(dev,
"pwr_reset");
- if (IS_ERR(ddata->pwr_reset)) {
- dev_err(dev, "Failed to get Power Reset\n");
- return PTR_ERR(ddata->pwr_reset);
- }
+ if (IS_ERR(ddata->pwr_reset))
+ return dev_err_probe(dev, PTR_ERR(ddata->pwr_reset),
+ "Failed to get Power Reset\n");
}
ddata->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(ddata->clk)) {
- dev_err(dev, "Failed to get clock\n");
- return PTR_ERR(ddata->clk);
- }
+ if (IS_ERR(ddata->clk))
+ return dev_err_probe(dev, PTR_ERR(ddata->clk),
+ "Failed to get clock\n");
err = of_property_read_u32(np, "clock-frequency", &ddata->clk_rate);
if (err) {
@@ -317,18 +314,11 @@ static int st_rproc_parse_dt(struct platform_device *pdev)
return err;
}
- ddata->boot_base = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
- if (IS_ERR(ddata->boot_base)) {
- dev_err(dev, "Boot base not found\n");
- return PTR_ERR(ddata->boot_base);
- }
-
- err = of_property_read_u32_index(np, "st,syscfg", 1,
- &ddata->boot_offset);
- if (err) {
- dev_err(dev, "Boot offset not found\n");
- return -EINVAL;
- }
+ ddata->boot_base = syscon_regmap_lookup_by_phandle_args(np, "st,syscfg",
+ 1, &ddata->boot_offset);
+ if (IS_ERR(ddata->boot_base))
+ return dev_err_probe(dev, PTR_ERR(ddata->boot_base),
+ "Boot base not found\n");
err = clk_prepare(ddata->clk);
if (err)
@@ -395,32 +385,32 @@ static int st_rproc_probe(struct platform_device *pdev)
*/
chan = mbox_request_channel_byname(&ddata->mbox_client_vq0, "vq0_rx");
if (IS_ERR(chan)) {
- dev_err(&rproc->dev, "failed to request mbox chan 0\n");
- ret = PTR_ERR(chan);
+ ret = dev_err_probe(&rproc->dev, PTR_ERR(chan),
+ "failed to request mbox chan 0\n");
goto free_clk;
}
ddata->mbox_chan[ST_RPROC_VQ0 * MBOX_MAX + MBOX_RX] = chan;
chan = mbox_request_channel_byname(&ddata->mbox_client_vq0, "vq0_tx");
if (IS_ERR(chan)) {
- dev_err(&rproc->dev, "failed to request mbox chan 0\n");
- ret = PTR_ERR(chan);
+ ret = dev_err_probe(&rproc->dev, PTR_ERR(chan),
+ "failed to request mbox chan 0\n");
goto free_mbox;
}
ddata->mbox_chan[ST_RPROC_VQ0 * MBOX_MAX + MBOX_TX] = chan;
chan = mbox_request_channel_byname(&ddata->mbox_client_vq1, "vq1_rx");
if (IS_ERR(chan)) {
- dev_err(&rproc->dev, "failed to request mbox chan 1\n");
- ret = PTR_ERR(chan);
+ ret = dev_err_probe(&rproc->dev, PTR_ERR(chan),
+ "failed to request mbox chan 1\n");
goto free_mbox;
}
ddata->mbox_chan[ST_RPROC_VQ1 * MBOX_MAX + MBOX_RX] = chan;
chan = mbox_request_channel_byname(&ddata->mbox_client_vq1, "vq1_tx");
if (IS_ERR(chan)) {
- dev_err(&rproc->dev, "failed to request mbox chan 1\n");
- ret = PTR_ERR(chan);
+ ret = dev_err_probe(&rproc->dev, PTR_ERR(chan),
+ "failed to request mbox chan 1\n");
goto free_mbox;
}
ddata->mbox_chan[ST_RPROC_VQ1 * MBOX_MAX + MBOX_TX] = chan;
@@ -457,7 +447,7 @@ static void st_rproc_remove(struct platform_device *pdev)
static struct platform_driver st_rproc_driver = {
.probe = st_rproc_probe,
- .remove_new = st_rproc_remove,
+ .remove = st_rproc_remove,
.driver = {
.name = "st-rproc",
.of_match_table = of_match_ptr(st_rproc_match),
diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c
index d17719384c16..5412beb0a692 100644
--- a/drivers/remoteproc/st_slim_rproc.c
+++ b/drivers/remoteproc/st_slim_rproc.c
@@ -259,16 +259,14 @@ struct st_slim_rproc *st_slim_rproc_alloc(struct platform_device *pdev,
slim_rproc->mem[i].size = resource_size(res);
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "slimcore");
- slim_rproc->slimcore = devm_ioremap_resource(dev, res);
+ slim_rproc->slimcore = devm_platform_ioremap_resource_byname(pdev, "slimcore");
if (IS_ERR(slim_rproc->slimcore)) {
dev_err(&pdev->dev, "failed to ioremap slimcore IO\n");
err = PTR_ERR(slim_rproc->slimcore);
goto err;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "peripherals");
- slim_rproc->peri = devm_ioremap_resource(dev, res);
+ slim_rproc->peri = devm_platform_ioremap_resource_byname(pdev, "peripherals");
if (IS_ERR(slim_rproc->peri)) {
dev_err(&pdev->dev, "failed to ioremap peripherals IO\n");
err = PTR_ERR(slim_rproc->peri);
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index 88623df7d0c3..b02b36a3f515 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -294,7 +294,7 @@ static void stm32_rproc_mb_vq_work(struct work_struct *work)
mutex_lock(&rproc->lock);
- if (rproc->state != RPROC_RUNNING)
+ if (rproc->state != RPROC_RUNNING && rproc->state != RPROC_ATTACHED)
goto unlock_mutex;
if (rproc_vq_interrupt(rproc, mb->vq_id) == IRQ_NONE)
@@ -946,7 +946,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(stm32_rproc_pm_ops,
static struct platform_driver stm32_rproc_driver = {
.probe = stm32_rproc_probe,
- .remove_new = stm32_rproc_remove,
+ .remove = stm32_rproc_remove,
.driver = {
.name = "stm32-rproc",
.pm = pm_ptr(&stm32_rproc_pm_ops),
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
index 3555b535b168..a695890254ff 100644
--- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
@@ -115,6 +115,10 @@ static void k3_dsp_rproc_mbox_callback(struct mbox_client *client, void *data)
const char *name = kproc->rproc->name;
u32 msg = omap_mbox_message(data);
+ /* Do not forward messages from a detached core */
+ if (kproc->rproc->state == RPROC_DETACHED)
+ return;
+
dev_dbg(dev, "mbox msg: 0x%x\n", msg);
switch (msg) {
@@ -155,6 +159,10 @@ static void k3_dsp_rproc_kick(struct rproc *rproc, int vqid)
mbox_msg_t msg = (mbox_msg_t)vqid;
int ret;
+ /* Do not forward messages to a detached core */
+ if (kproc->rproc->state == RPROC_DETACHED)
+ return;
+
/* send the index of the triggered virtqueue in the mailbox payload */
ret = mbox_send_message(kproc->mbox, (void *)msg);
if (ret < 0)
@@ -230,12 +238,9 @@ static int k3_dsp_rproc_request_mbox(struct rproc *rproc)
client->knows_txdone = false;
kproc->mbox = mbox_request_channel(client, 0);
- if (IS_ERR(kproc->mbox)) {
- ret = -EBUSY;
- dev_err(dev, "mbox_request_channel failed: %ld\n",
- PTR_ERR(kproc->mbox));
- return ret;
- }
+ if (IS_ERR(kproc->mbox))
+ return dev_err_probe(dev, PTR_ERR(kproc->mbox),
+ "mbox_request_channel failed\n");
/*
* Ping the remote processor, this is only for sanity-sake for now;
@@ -315,32 +320,23 @@ static int k3_dsp_rproc_start(struct rproc *rproc)
u32 boot_addr;
int ret;
- ret = k3_dsp_rproc_request_mbox(rproc);
- if (ret)
- return ret;
-
boot_addr = rproc->bootaddr;
if (boot_addr & (kproc->data->boot_align_addr - 1)) {
dev_err(dev, "invalid boot address 0x%x, must be aligned on a 0x%x boundary\n",
boot_addr, kproc->data->boot_align_addr);
- ret = -EINVAL;
- goto put_mbox;
+ return -EINVAL;
}
- dev_err(dev, "booting DSP core using boot addr = 0x%x\n", boot_addr);
+ dev_dbg(dev, "booting DSP core using boot addr = 0x%x\n", boot_addr);
ret = ti_sci_proc_set_config(kproc->tsp, boot_addr, 0, 0);
if (ret)
- goto put_mbox;
+ return ret;
ret = k3_dsp_rproc_release(kproc);
if (ret)
- goto put_mbox;
+ return ret;
return 0;
-
-put_mbox:
- mbox_free_channel(kproc->mbox);
- return ret;
}
/*
@@ -353,8 +349,6 @@ static int k3_dsp_rproc_stop(struct rproc *rproc)
{
struct k3_dsp_rproc *kproc = rproc->priv;
- mbox_free_channel(kproc->mbox);
-
k3_dsp_rproc_reset(kproc);
return 0;
@@ -363,42 +357,22 @@ static int k3_dsp_rproc_stop(struct rproc *rproc)
/*
* Attach to a running DSP remote processor (IPC-only mode)
*
- * This rproc attach callback only needs to request the mailbox, the remote
- * processor is already booted, so there is no need to issue any TI-SCI
- * commands to boot the DSP core. This callback is invoked only in IPC-only
- * mode.
+ * This rproc attach callback is a NOP. The remote processor is already booted,
+ * and all required resources have been acquired during probe routine, so there
+ * is no need to issue any TI-SCI commands to boot the DSP core. This callback
+ * is invoked only in IPC-only mode and exists because rproc_validate() checks
+ * for its existence.
*/
-static int k3_dsp_rproc_attach(struct rproc *rproc)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- int ret;
-
- ret = k3_dsp_rproc_request_mbox(rproc);
- if (ret)
- return ret;
-
- dev_info(dev, "DSP initialized in IPC-only mode\n");
- return 0;
-}
+static int k3_dsp_rproc_attach(struct rproc *rproc) { return 0; }
/*
* Detach from a running DSP remote processor (IPC-only mode)
*
- * This rproc detach callback performs the opposite operation to attach callback
- * and only needs to release the mailbox, the DSP core is not stopped and will
- * be left to continue to run its booted firmware. This callback is invoked only
- * in IPC-only mode.
+ * This rproc detach callback is a NOP. The DSP core is not stopped and will be
+ * left to continue to run its booted firmware. This callback is invoked only in
+ * IPC-only mode and exists for sanity sake.
*/
-static int k3_dsp_rproc_detach(struct rproc *rproc)
-{
- struct k3_dsp_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
-
- mbox_free_channel(kproc->mbox);
- dev_info(dev, "DSP deinitialized in IPC-only mode\n");
- return 0;
-}
+static int k3_dsp_rproc_detach(struct rproc *rproc) { return 0; }
/*
* This function implements the .get_loaded_rsc_table() callback and is used
@@ -429,7 +403,7 @@ static struct resource_table *k3_dsp_get_loaded_rsc_table(struct rproc *rproc,
* the hard-coded value suffices to support the IPC-only mode.
*/
*rsc_table_sz = 256;
- return (struct resource_table *)kproc->rmem[0].cpu_addr;
+ return (__force struct resource_table *)kproc->rmem[0].cpu_addr;
}
/*
@@ -602,11 +576,9 @@ static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
return -EINVAL;
rmem = of_reserved_mem_lookup(rmem_np);
- if (!rmem) {
- of_node_put(rmem_np);
- return -EINVAL;
- }
of_node_put(rmem_np);
+ if (!rmem)
+ return -EINVAL;
kproc->rmem[i].bus_addr = rmem->base;
/* 64-bit address regions currently not supported */
@@ -636,32 +608,6 @@ static void k3_dsp_release_tsp(void *data)
ti_sci_proc_release(tsp);
}
-static
-struct ti_sci_proc *k3_dsp_rproc_of_get_tsp(struct device *dev,
- const struct ti_sci_handle *sci)
-{
- struct ti_sci_proc *tsp;
- u32 temp[2];
- int ret;
-
- ret = of_property_read_u32_array(dev->of_node, "ti,sci-proc-ids",
- temp, 2);
- if (ret < 0)
- return ERR_PTR(ret);
-
- tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL);
- if (!tsp)
- return ERR_PTR(-ENOMEM);
-
- tsp->dev = dev;
- tsp->sci = sci;
- tsp->ops = &sci->ops.proc_ops;
- tsp->proc_id = temp[0];
- tsp->host_id = temp[1];
-
- return tsp;
-}
-
static int k3_dsp_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -697,6 +643,10 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
kproc->dev = dev;
kproc->data = data;
+ ret = k3_dsp_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
+
kproc->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
if (IS_ERR(kproc->ti_sci))
return dev_err_probe(dev, PTR_ERR(kproc->ti_sci),
@@ -711,7 +661,7 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(kproc->reset),
"failed to get reset\n");
- kproc->tsp = k3_dsp_rproc_of_get_tsp(dev, kproc->ti_sci);
+ kproc->tsp = ti_sci_proc_of_get_tsp(dev, kproc->ti_sci);
if (IS_ERR(kproc->tsp))
return dev_err_probe(dev, PTR_ERR(kproc->tsp),
"failed to construct ti-sci proc control\n");
@@ -789,6 +739,8 @@ static void k3_dsp_rproc_remove(struct platform_device *pdev)
if (ret)
dev_err(dev, "failed to detach proc (%pe)\n", ERR_PTR(ret));
}
+
+ mbox_free_channel(kproc->mbox);
}
static const struct k3_dsp_mem_data c66_mems[] = {
@@ -839,7 +791,7 @@ MODULE_DEVICE_TABLE(of, k3_dsp_of_match);
static struct platform_driver k3_dsp_rproc_driver = {
.probe = k3_dsp_rproc_probe,
- .remove_new = k3_dsp_rproc_remove,
+ .remove = k3_dsp_rproc_remove,
.driver = {
.name = "k3-dsp-rproc",
.of_match_table = k3_dsp_of_match,
diff --git a/drivers/remoteproc/ti_k3_m4_remoteproc.c b/drivers/remoteproc/ti_k3_m4_remoteproc.c
new file mode 100644
index 000000000000..a16fb165fced
--- /dev/null
+++ b/drivers/remoteproc/ti_k3_m4_remoteproc.c
@@ -0,0 +1,665 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI K3 Cortex-M4 Remote Processor(s) driver
+ *
+ * Copyright (C) 2021-2024 Texas Instruments Incorporated - https://www.ti.com/
+ * Hari Nagalla <hnagalla@ti.com>
+ */
+
+#include <linux/io.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "omap_remoteproc.h"
+#include "remoteproc_internal.h"
+#include "ti_sci_proc.h"
+
+#define K3_M4_IRAM_DEV_ADDR 0x00000
+#define K3_M4_DRAM_DEV_ADDR 0x30000
+
+/**
+ * struct k3_m4_rproc_mem - internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: Bus address used to access the memory region
+ * @dev_addr: Device address of the memory region from remote processor view
+ * @size: Size of the memory region
+ */
+struct k3_m4_rproc_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+/**
+ * struct k3_m4_rproc_mem_data - memory definitions for a remote processor
+ * @name: name for this memory entry
+ * @dev_addr: device address for the memory entry
+ */
+struct k3_m4_rproc_mem_data {
+ const char *name;
+ const u32 dev_addr;
+};
+
+/**
+ * struct k3_m4_rproc - k3 remote processor driver structure
+ * @dev: cached device pointer
+ * @mem: internal memory regions data
+ * @num_mems: number of internal memory regions
+ * @rmem: reserved memory regions data
+ * @num_rmems: number of reserved memory regions
+ * @reset: reset control handle
+ * @tsp: TI-SCI processor control handle
+ * @ti_sci: TI-SCI handle
+ * @ti_sci_id: TI-SCI device identifier
+ * @mbox: mailbox channel handle
+ * @client: mailbox client to request the mailbox channel
+ */
+struct k3_m4_rproc {
+ struct device *dev;
+ struct k3_m4_rproc_mem *mem;
+ int num_mems;
+ struct k3_m4_rproc_mem *rmem;
+ int num_rmems;
+ struct reset_control *reset;
+ struct ti_sci_proc *tsp;
+ const struct ti_sci_handle *ti_sci;
+ u32 ti_sci_id;
+ struct mbox_chan *mbox;
+ struct mbox_client client;
+};
+
+/**
+ * k3_m4_rproc_mbox_callback() - inbound mailbox message handler
+ * @client: mailbox client pointer used for requesting the mailbox channel
+ * @data: mailbox payload
+ *
+ * This handler is invoked by the K3 mailbox driver whenever a mailbox
+ * message is received. Usually, the mailbox payload simply contains
+ * the index of the virtqueue that is kicked by the remote processor,
+ * and we let remoteproc core handle it.
+ *
+ * In addition to virtqueue indices, we also have some out-of-band values
+ * that indicate different events. Those values are deliberately very
+ * large so they don't coincide with virtqueue indices.
+ */
+static void k3_m4_rproc_mbox_callback(struct mbox_client *client, void *data)
+{
+ struct device *dev = client->dev;
+ struct rproc *rproc = dev_get_drvdata(dev);
+ u32 msg = (u32)(uintptr_t)(data);
+
+ dev_dbg(dev, "mbox msg: 0x%x\n", msg);
+
+ switch (msg) {
+ case RP_MBOX_CRASH:
+ /*
+ * remoteproc detected an exception, but error recovery is not
+ * supported. So, just log this for now
+ */
+ dev_err(dev, "K3 rproc %s crashed\n", rproc->name);
+ break;
+ case RP_MBOX_ECHO_REPLY:
+ dev_info(dev, "received echo reply from %s\n", rproc->name);
+ break;
+ default:
+ /* silently handle all other valid messages */
+ if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
+ return;
+ if (msg > rproc->max_notifyid) {
+ dev_dbg(dev, "dropping unknown message 0x%x", msg);
+ return;
+ }
+ /* msg contains the index of the triggered vring */
+ if (rproc_vq_interrupt(rproc, msg) == IRQ_NONE)
+ dev_dbg(dev, "no message was found in vqid %d\n", msg);
+ }
+}
+
+/*
+ * Kick the remote processor to notify about pending unprocessed messages.
+ * The vqid usage is not used and is inconsequential, as the kick is performed
+ * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
+ * the remote processor is expected to process both its Tx and Rx virtqueues.
+ */
+static void k3_m4_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ u32 msg = (u32)vqid;
+ int ret;
+
+ /*
+ * Send the index of the triggered virtqueue in the mailbox payload.
+ * NOTE: msg is cast to uintptr_t to prevent compiler warnings when
+ * void* is 64bit. It is safely cast back to u32 in the mailbox driver.
+ */
+ ret = mbox_send_message(kproc->mbox, (void *)(uintptr_t)msg);
+ if (ret < 0)
+ dev_err(dev, "failed to send mailbox message, status = %d\n",
+ ret);
+}
+
+static int k3_m4_rproc_ping_mbox(struct k3_m4_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ int ret;
+
+ /*
+ * Ping the remote processor, this is only for sanity-sake for now;
+ * there is no functional effect whatsoever.
+ *
+ * Note that the reply will _not_ arrive immediately: this message
+ * will wait in the mailbox fifo until the remote processor is booted.
+ */
+ ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
+ if (ret < 0) {
+ dev_err(dev, "mbox_send_message failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * The M4 cores have a local reset that affects only the CPU, and a
+ * generic module reset that powers on the device and allows the internal
+ * memories to be accessed while the local reset is asserted. This function is
+ * used to release the global reset on remote cores to allow loading into the
+ * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
+ * firmware loading, and is followed by the .start() ops after loading to
+ * actually let the remote cores to run.
+ */
+static int k3_m4_rproc_prepare(struct rproc *rproc)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ /* If the core is running already no need to deassert the module reset */
+ if (rproc->state == RPROC_DETACHED)
+ return 0;
+
+ /*
+ * Ensure the local reset is asserted so the core doesn't
+ * execute bogus code when the module reset is released.
+ */
+ ret = reset_control_assert(kproc->reset);
+ if (ret) {
+ dev_err(dev, "could not assert local reset\n");
+ return ret;
+ }
+
+ ret = reset_control_status(kproc->reset);
+ if (ret <= 0) {
+ dev_err(dev, "local reset still not asserted\n");
+ return ret;
+ }
+
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "could not deassert module-reset for internal RAM loading\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * This function implements the .unprepare() ops and performs the complimentary
+ * operations to that of the .prepare() ops. The function is used to assert the
+ * global reset on applicable cores. This completes the second portion of
+ * powering down the remote core. The cores themselves are only halted in the
+ * .stop() callback through the local reset, and the .unprepare() ops is invoked
+ * by the remoteproc core after the remoteproc is stopped to balance the global
+ * reset.
+ */
+static int k3_m4_rproc_unprepare(struct rproc *rproc)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ /* If the core is going to be detached do not assert the module reset */
+ if (rproc->state == RPROC_ATTACHED)
+ return 0;
+
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "module-reset assert failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * This function implements the .get_loaded_rsc_table() callback and is used
+ * to provide the resource table for a booted remote processor in IPC-only
+ * mode. The remote processor firmwares follow a design-by-contract approach
+ * and are expected to have the resource table at the base of the DDR region
+ * reserved for firmware usage. This provides flexibility for the remote
+ * processor to be booted by different bootloaders that may or may not have the
+ * ability to publish the resource table address and size through a DT
+ * property.
+ */
+static struct resource_table *k3_m4_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *rsc_table_sz)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ if (!kproc->rmem[0].cpu_addr) {
+ dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * NOTE: The resource table size is currently hard-coded to a maximum
+ * of 256 bytes. The most common resource table usage for K3 firmwares
+ * is to only have the vdev resource entry and an optional trace entry.
+ * The exact size could be computed based on resource table address, but
+ * the hard-coded value suffices to support the IPC-only mode.
+ */
+ *rsc_table_sz = 256;
+ return (__force struct resource_table *)kproc->rmem[0].cpu_addr;
+}
+
+/*
+ * Custom function to translate a remote processor device address (internal
+ * RAMs only) to a kernel virtual address. The remote processors can access
+ * their RAMs at either an internal address visible only from a remote
+ * processor, or at the SoC-level bus address. Both these addresses need to be
+ * looked through for translation. The translated addresses can be used either
+ * by the remoteproc core for loading (when using kernel remoteproc loader), or
+ * by any rpmsg bus drivers.
+ */
+static void *k3_m4_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ void __iomem *va = NULL;
+ phys_addr_t bus_addr;
+ u32 dev_addr, offset;
+ size_t size;
+ int i;
+
+ if (len == 0)
+ return NULL;
+
+ for (i = 0; i < kproc->num_mems; i++) {
+ bus_addr = kproc->mem[i].bus_addr;
+ dev_addr = kproc->mem[i].dev_addr;
+ size = kproc->mem[i].size;
+
+ /* handle M4-view addresses */
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+
+ /* handle SoC-view addresses */
+ if (da >= bus_addr && ((da + len) <= (bus_addr + size))) {
+ offset = da - bus_addr;
+ va = kproc->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ /* handle static DDR reserved memory regions */
+ for (i = 0; i < kproc->num_rmems; i++) {
+ dev_addr = kproc->rmem[i].dev_addr;
+ size = kproc->rmem[i].size;
+
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->rmem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ return NULL;
+}
+
+static int k3_m4_rproc_of_get_memories(struct platform_device *pdev,
+ struct k3_m4_rproc *kproc)
+{
+ static const char * const mem_names[] = { "iram", "dram" };
+ static const u32 mem_addrs[] = { K3_M4_IRAM_DEV_ADDR, K3_M4_DRAM_DEV_ADDR };
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int num_mems;
+ int i;
+
+ num_mems = ARRAY_SIZE(mem_names);
+ kproc->mem = devm_kcalloc(kproc->dev, num_mems,
+ sizeof(*kproc->mem), GFP_KERNEL);
+ if (!kproc->mem)
+ return -ENOMEM;
+
+ for (i = 0; i < num_mems; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ mem_names[i]);
+ if (!res) {
+ dev_err(dev, "found no memory resource for %s\n",
+ mem_names[i]);
+ return -EINVAL;
+ }
+ if (!devm_request_mem_region(dev, res->start,
+ resource_size(res),
+ dev_name(dev))) {
+ dev_err(dev, "could not request %s region for resource\n",
+ mem_names[i]);
+ return -EBUSY;
+ }
+
+ kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
+ resource_size(res));
+ if (!kproc->mem[i].cpu_addr) {
+ dev_err(dev, "failed to map %s memory\n",
+ mem_names[i]);
+ return -ENOMEM;
+ }
+ kproc->mem[i].bus_addr = res->start;
+ kproc->mem[i].dev_addr = mem_addrs[i];
+ kproc->mem[i].size = resource_size(res);
+
+ dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ mem_names[i], &kproc->mem[i].bus_addr,
+ kproc->mem[i].size, kproc->mem[i].cpu_addr,
+ kproc->mem[i].dev_addr);
+ }
+ kproc->num_mems = num_mems;
+
+ return 0;
+}
+
+static void k3_m4_rproc_dev_mem_release(void *data)
+{
+ struct device *dev = data;
+
+ of_reserved_mem_device_release(dev);
+}
+
+static int k3_m4_reserved_mem_init(struct k3_m4_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *rmem_np;
+ struct reserved_mem *rmem;
+ int num_rmems;
+ int ret, i;
+
+ num_rmems = of_property_count_elems_of_size(np, "memory-region",
+ sizeof(phandle));
+ if (num_rmems < 0) {
+ dev_err(dev, "device does not reserved memory regions (%d)\n",
+ num_rmems);
+ return -EINVAL;
+ }
+ if (num_rmems < 2) {
+ dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
+ num_rmems);
+ return -EINVAL;
+ }
+
+ /* use reserved memory region 0 for vring DMA allocations */
+ ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
+ if (ret) {
+ dev_err(dev, "device cannot initialize DMA pool (%d)\n", ret);
+ return ret;
+ }
+ ret = devm_add_action_or_reset(dev, k3_m4_rproc_dev_mem_release, dev);
+ if (ret)
+ return ret;
+
+ num_rmems--;
+ kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
+ if (!kproc->rmem)
+ return -ENOMEM;
+
+ /* use remaining reserved memory regions for static carveouts */
+ for (i = 0; i < num_rmems; i++) {
+ rmem_np = of_parse_phandle(np, "memory-region", i + 1);
+ if (!rmem_np)
+ return -EINVAL;
+
+ rmem = of_reserved_mem_lookup(rmem_np);
+ of_node_put(rmem_np);
+ if (!rmem)
+ return -EINVAL;
+
+ kproc->rmem[i].bus_addr = rmem->base;
+ /* 64-bit address regions currently not supported */
+ kproc->rmem[i].dev_addr = (u32)rmem->base;
+ kproc->rmem[i].size = rmem->size;
+ kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
+ if (!kproc->rmem[i].cpu_addr) {
+ dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
+ i + 1, &rmem->base, &rmem->size);
+ return -ENOMEM;
+ }
+
+ dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ i + 1, &kproc->rmem[i].bus_addr,
+ kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
+ kproc->rmem[i].dev_addr);
+ }
+ kproc->num_rmems = num_rmems;
+
+ return 0;
+}
+
+static void k3_m4_release_tsp(void *data)
+{
+ struct ti_sci_proc *tsp = data;
+
+ ti_sci_proc_release(tsp);
+}
+
+/*
+ * Power up the M4 remote processor.
+ *
+ * This function will be invoked only after the firmware for this rproc
+ * was loaded, parsed successfully, and all of its resource requirements
+ * were met. This callback is invoked only in remoteproc mode.
+ */
+static int k3_m4_rproc_start(struct rproc *rproc)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = k3_m4_rproc_ping_mbox(kproc);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(kproc->reset);
+ if (ret) {
+ dev_err(dev, "local-reset deassert failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Stop the M4 remote processor.
+ *
+ * This function puts the M4 processor into reset, and finishes processing
+ * of any pending messages. This callback is invoked only in remoteproc mode.
+ */
+static int k3_m4_rproc_stop(struct rproc *rproc)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = reset_control_assert(kproc->reset);
+ if (ret) {
+ dev_err(dev, "local-reset assert failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Attach to a running M4 remote processor (IPC-only mode)
+ *
+ * The remote processor is already booted, so there is no need to issue any
+ * TI-SCI commands to boot the M4 core. This callback is used only in IPC-only
+ * mode.
+ */
+static int k3_m4_rproc_attach(struct rproc *rproc)
+{
+ struct k3_m4_rproc *kproc = rproc->priv;
+ int ret;
+
+ ret = k3_m4_rproc_ping_mbox(kproc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Detach from a running M4 remote processor (IPC-only mode)
+ *
+ * This rproc detach callback performs the opposite operation to attach
+ * callback, the M4 core is not stopped and will be left to continue to
+ * run its booted firmware. This callback is invoked only in IPC-only mode.
+ */
+static int k3_m4_rproc_detach(struct rproc *rproc)
+{
+ return 0;
+}
+
+static const struct rproc_ops k3_m4_rproc_ops = {
+ .prepare = k3_m4_rproc_prepare,
+ .unprepare = k3_m4_rproc_unprepare,
+ .start = k3_m4_rproc_start,
+ .stop = k3_m4_rproc_stop,
+ .attach = k3_m4_rproc_attach,
+ .detach = k3_m4_rproc_detach,
+ .kick = k3_m4_rproc_kick,
+ .da_to_va = k3_m4_rproc_da_to_va,
+ .get_loaded_rsc_table = k3_m4_get_loaded_rsc_table,
+};
+
+static int k3_m4_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct k3_m4_rproc *kproc;
+ struct rproc *rproc;
+ const char *fw_name;
+ bool r_state = false;
+ bool p_state = false;
+ int ret;
+
+ ret = rproc_of_parse_firmware(dev, 0, &fw_name);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to parse firmware-name property\n");
+
+ rproc = devm_rproc_alloc(dev, dev_name(dev), &k3_m4_rproc_ops, fw_name,
+ sizeof(*kproc));
+ if (!rproc)
+ return -ENOMEM;
+
+ rproc->has_iommu = false;
+ rproc->recovery_disabled = true;
+ kproc = rproc->priv;
+ kproc->dev = dev;
+ platform_set_drvdata(pdev, rproc);
+
+ kproc->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
+ if (IS_ERR(kproc->ti_sci))
+ return dev_err_probe(dev, PTR_ERR(kproc->ti_sci),
+ "failed to get ti-sci handle\n");
+
+ ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", &kproc->ti_sci_id);
+ if (ret)
+ return dev_err_probe(dev, ret, "missing 'ti,sci-dev-id' property\n");
+
+ kproc->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(kproc->reset))
+ return dev_err_probe(dev, PTR_ERR(kproc->reset), "failed to get reset\n");
+
+ kproc->tsp = ti_sci_proc_of_get_tsp(dev, kproc->ti_sci);
+ if (IS_ERR(kproc->tsp))
+ return dev_err_probe(dev, PTR_ERR(kproc->tsp),
+ "failed to construct ti-sci proc control\n");
+
+ ret = ti_sci_proc_request(kproc->tsp);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "ti_sci_proc_request failed\n");
+ ret = devm_add_action_or_reset(dev, k3_m4_release_tsp, kproc->tsp);
+ if (ret)
+ return ret;
+
+ ret = k3_m4_rproc_of_get_memories(pdev, kproc);
+ if (ret)
+ return ret;
+
+ ret = k3_m4_reserved_mem_init(kproc);
+ if (ret)
+ return dev_err_probe(dev, ret, "reserved memory init failed\n");
+
+ ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id,
+ &r_state, &p_state);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get initial state, mode cannot be determined\n");
+
+ /* configure devices for either remoteproc or IPC-only mode */
+ if (p_state) {
+ rproc->state = RPROC_DETACHED;
+ dev_info(dev, "configured M4F for IPC-only mode\n");
+ } else {
+ dev_info(dev, "configured M4F for remoteproc mode\n");
+ }
+
+ kproc->client.dev = dev;
+ kproc->client.tx_done = NULL;
+ kproc->client.rx_callback = k3_m4_rproc_mbox_callback;
+ kproc->client.tx_block = false;
+ kproc->client.knows_txdone = false;
+ kproc->mbox = mbox_request_channel(&kproc->client, 0);
+ if (IS_ERR(kproc->mbox))
+ return dev_err_probe(dev, PTR_ERR(kproc->mbox),
+ "mbox_request_channel failed\n");
+
+ ret = devm_rproc_add(dev, rproc);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to register device with remoteproc core\n");
+
+ return 0;
+}
+
+static const struct of_device_id k3_m4_of_match[] = {
+ { .compatible = "ti,am64-m4fss", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, k3_m4_of_match);
+
+static struct platform_driver k3_m4_rproc_driver = {
+ .probe = k3_m4_rproc_probe,
+ .driver = {
+ .name = "k3-m4-rproc",
+ .of_match_table = k3_m4_of_match,
+ },
+};
+module_platform_driver(k3_m4_rproc_driver);
+
+MODULE_AUTHOR("Hari Nagalla <hnagalla@ti.com>");
+MODULE_DESCRIPTION("TI K3 M4 Remoteproc driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index ad3415a3851b..dbc513c5569c 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -103,12 +103,14 @@ struct k3_r5_soc_data {
* @dev: cached device pointer
* @mode: Mode to configure the Cluster - Split or LockStep
* @cores: list of R5 cores within the cluster
+ * @core_transition: wait queue to sync core state changes
* @soc_data: SoC-specific feature data for a R5FSS
*/
struct k3_r5_cluster {
struct device *dev;
enum cluster_mode mode;
struct list_head cores;
+ wait_queue_head_t core_transition;
const struct k3_r5_soc_data *soc_data;
};
@@ -128,6 +130,7 @@ struct k3_r5_cluster {
* @atcm_enable: flag to control ATCM enablement
* @btcm_enable: flag to control BTCM enablement
* @loczrama: flag to dictate which TCM is at device address 0x0
+ * @released_from_reset: flag to signal when core is out of reset
*/
struct k3_r5_core {
struct list_head elem;
@@ -144,6 +147,7 @@ struct k3_r5_core {
u32 atcm_enable;
u32 btcm_enable;
u32 loczrama;
+ bool released_from_reset;
};
/**
@@ -190,6 +194,10 @@ static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data)
const char *name = kproc->rproc->name;
u32 msg = omap_mbox_message(data);
+ /* Do not forward message from a detached core */
+ if (kproc->rproc->state == RPROC_DETACHED)
+ return;
+
dev_dbg(dev, "mbox msg: 0x%x\n", msg);
switch (msg) {
@@ -225,6 +233,10 @@ static void k3_r5_rproc_kick(struct rproc *rproc, int vqid)
mbox_msg_t msg = (mbox_msg_t)vqid;
int ret;
+ /* Do not forward message to a detached core */
+ if (kproc->rproc->state == RPROC_DETACHED)
+ return;
+
/* send the index of the triggered virtqueue in the mailbox payload */
ret = mbox_send_message(kproc->mbox, (void *)msg);
if (ret < 0)
@@ -395,12 +407,9 @@ static int k3_r5_rproc_request_mbox(struct rproc *rproc)
client->knows_txdone = false;
kproc->mbox = mbox_request_channel(client, 0);
- if (IS_ERR(kproc->mbox)) {
- ret = -EBUSY;
- dev_err(dev, "mbox_request_channel failed: %ld\n",
- PTR_ERR(kproc->mbox));
- return ret;
- }
+ if (IS_ERR(kproc->mbox))
+ return dev_err_probe(dev, PTR_ERR(kproc->mbox),
+ "mbox_request_channel failed\n");
/*
* Ping the remote processor, this is only for sanity-sake for now;
@@ -478,10 +487,10 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
* can be effective on all TCM addresses.
*/
dev_dbg(dev, "zeroing out ATCM memory\n");
- memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
+ memset_io(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
dev_dbg(dev, "zeroing out BTCM memory\n");
- memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
+ memset_io(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
return 0;
}
@@ -542,14 +551,10 @@ static int k3_r5_rproc_start(struct rproc *rproc)
struct k3_r5_rproc *kproc = rproc->priv;
struct k3_r5_cluster *cluster = kproc->cluster;
struct device *dev = kproc->dev;
- struct k3_r5_core *core;
+ struct k3_r5_core *core0, *core;
u32 boot_addr;
int ret;
- ret = k3_r5_rproc_request_mbox(rproc);
- if (ret)
- return ret;
-
boot_addr = rproc->bootaddr;
/* TODO: add boot_addr sanity checking */
dev_dbg(dev, "booting R5F core using boot addr = 0x%x\n", boot_addr);
@@ -558,7 +563,7 @@ static int k3_r5_rproc_start(struct rproc *rproc)
core = kproc->core;
ret = ti_sci_proc_set_config(core->tsp, boot_addr, 0, 0);
if (ret)
- goto put_mbox;
+ return ret;
/* unhalt/run all applicable cores */
if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
@@ -568,9 +573,21 @@ static int k3_r5_rproc_start(struct rproc *rproc)
goto unroll_core_run;
}
} else {
+ /* do not allow core 1 to start before core 0 */
+ core0 = list_first_entry(&cluster->cores, struct k3_r5_core,
+ elem);
+ if (core != core0 && core0->rproc->state == RPROC_OFFLINE) {
+ dev_err(dev, "%s: can not start core 1 before core 0\n",
+ __func__);
+ return -EPERM;
+ }
+
ret = k3_r5_core_run(core);
if (ret)
- goto put_mbox;
+ return ret;
+
+ core->released_from_reset = true;
+ wake_up_interruptible(&cluster->core_transition);
}
return 0;
@@ -580,8 +597,6 @@ unroll_core_run:
if (k3_r5_core_halt(core))
dev_warn(core->dev, "core halt back failed\n");
}
-put_mbox:
- mbox_free_channel(kproc->mbox);
return ret;
}
@@ -613,7 +628,8 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
{
struct k3_r5_rproc *kproc = rproc->priv;
struct k3_r5_cluster *cluster = kproc->cluster;
- struct k3_r5_core *core = kproc->core;
+ struct device *dev = kproc->dev;
+ struct k3_r5_core *core1, *core = kproc->core;
int ret;
/* halt all applicable cores */
@@ -626,13 +642,21 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
}
}
} else {
+ /* do not allow core 0 to stop before core 1 */
+ core1 = list_last_entry(&cluster->cores, struct k3_r5_core,
+ elem);
+ if (core != core1 && core1->rproc->state != RPROC_OFFLINE) {
+ dev_err(dev, "%s: can not stop core 0 before core 1\n",
+ __func__);
+ ret = -EPERM;
+ goto out;
+ }
+
ret = k3_r5_core_halt(core);
if (ret)
goto out;
}
- mbox_free_channel(kproc->mbox);
-
return 0;
unroll_core_halt:
@@ -647,42 +671,22 @@ out:
/*
* Attach to a running R5F remote processor (IPC-only mode)
*
- * The R5F attach callback only needs to request the mailbox, the remote
- * processor is already booted, so there is no need to issue any TI-SCI
- * commands to boot the R5F cores in IPC-only mode. This callback is invoked
- * only in IPC-only mode.
+ * The R5F attach callback is a NOP. The remote processor is already booted, and
+ * all required resources have been acquired during probe routine, so there is
+ * no need to issue any TI-SCI commands to boot the R5F cores in IPC-only mode.
+ * This callback is invoked only in IPC-only mode and exists because
+ * rproc_validate() checks for its existence.
*/
-static int k3_r5_rproc_attach(struct rproc *rproc)
-{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
- int ret;
-
- ret = k3_r5_rproc_request_mbox(rproc);
- if (ret)
- return ret;
-
- dev_info(dev, "R5F core initialized in IPC-only mode\n");
- return 0;
-}
+static int k3_r5_rproc_attach(struct rproc *rproc) { return 0; }
/*
* Detach from a running R5F remote processor (IPC-only mode)
*
- * The R5F detach callback performs the opposite operation to attach callback
- * and only needs to release the mailbox, the R5F cores are not stopped and
- * will be left in booted state in IPC-only mode. This callback is invoked
- * only in IPC-only mode.
+ * The R5F detach callback is a NOP. The R5F cores are not stopped and will be
+ * left in booted state in IPC-only mode. This callback is invoked only in
+ * IPC-only mode and exists for sanity sake.
*/
-static int k3_r5_rproc_detach(struct rproc *rproc)
-{
- struct k3_r5_rproc *kproc = rproc->priv;
- struct device *dev = kproc->dev;
-
- mbox_free_channel(kproc->mbox);
- dev_info(dev, "R5F core deinitialized in IPC-only mode\n");
- return 0;
-}
+static int k3_r5_rproc_detach(struct rproc *rproc) { return 0; }
/*
* This function implements the .get_loaded_rsc_table() callback and is used
@@ -713,7 +717,7 @@ static struct resource_table *k3_r5_get_loaded_rsc_table(struct rproc *rproc,
* the hard-coded value suffices to support the IPC-only mode.
*/
*rsc_table_sz = 256;
- return (struct resource_table *)kproc->rmem[0].cpu_addr;
+ return (__force struct resource_table *)kproc->rmem[0].cpu_addr;
}
/*
@@ -951,6 +955,13 @@ out:
return ret;
}
+static void k3_r5_mem_release(void *data)
+{
+ struct device *dev = data;
+
+ of_reserved_mem_device_release(dev);
+}
+
static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
{
struct device *dev = kproc->dev;
@@ -981,28 +992,25 @@ static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
return ret;
}
+ ret = devm_add_action_or_reset(dev, k3_r5_mem_release, dev);
+ if (ret)
+ return ret;
+
num_rmems--;
- kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
- if (!kproc->rmem) {
- ret = -ENOMEM;
- goto release_rmem;
- }
+ kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
+ if (!kproc->rmem)
+ return -ENOMEM;
/* use remaining reserved memory regions for static carveouts */
for (i = 0; i < num_rmems; i++) {
rmem_np = of_parse_phandle(np, "memory-region", i + 1);
- if (!rmem_np) {
- ret = -EINVAL;
- goto unmap_rmem;
- }
+ if (!rmem_np)
+ return -EINVAL;
rmem = of_reserved_mem_lookup(rmem_np);
- if (!rmem) {
- of_node_put(rmem_np);
- ret = -EINVAL;
- goto unmap_rmem;
- }
of_node_put(rmem_np);
+ if (!rmem)
+ return -EINVAL;
kproc->rmem[i].bus_addr = rmem->base;
/*
@@ -1017,12 +1025,11 @@ static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
*/
kproc->rmem[i].dev_addr = (u32)rmem->base;
kproc->rmem[i].size = rmem->size;
- kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
+ kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
if (!kproc->rmem[i].cpu_addr) {
dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
i + 1, &rmem->base, &rmem->size);
- ret = -ENOMEM;
- goto unmap_rmem;
+ return -ENOMEM;
}
dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
@@ -1033,25 +1040,6 @@ static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
kproc->num_rmems = num_rmems;
return 0;
-
-unmap_rmem:
- for (i--; i >= 0; i--)
- iounmap(kproc->rmem[i].cpu_addr);
- kfree(kproc->rmem);
-release_rmem:
- of_reserved_mem_device_release(dev);
- return ret;
-}
-
-static void k3_r5_reserved_mem_exit(struct k3_r5_rproc *kproc)
-{
- int i;
-
- for (i = 0; i < kproc->num_rmems; i++)
- iounmap(kproc->rmem[i].cpu_addr);
- kfree(kproc->rmem);
-
- of_reserved_mem_device_release(kproc->dev);
}
/*
@@ -1117,6 +1105,7 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
u32 atcm_enable, btcm_enable, loczrama;
struct k3_r5_core *core0;
enum cluster_mode mode = cluster->mode;
+ int reset_ctrl_status;
int ret;
core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
@@ -1133,13 +1122,19 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
r_state, c_state);
}
- ret = reset_control_status(core->reset);
- if (ret < 0) {
+ reset_ctrl_status = reset_control_status(core->reset);
+ if (reset_ctrl_status < 0) {
dev_err(cdev, "failed to get initial local reset status, ret = %d\n",
- ret);
- return ret;
+ reset_ctrl_status);
+ return reset_ctrl_status;
}
+ /*
+ * Skip the waiting mechanism for sequential power-on of cores if the
+ * core has already been booted by another entity.
+ */
+ core->released_from_reset = c_state;
+
ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
&stat);
if (ret < 0) {
@@ -1166,7 +1161,7 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
* irrelevant if module reset is asserted (POR value has local reset
* deasserted), and is deemed as remoteproc mode
*/
- if (c_state && !ret && !halted) {
+ if (c_state && !reset_ctrl_status && !halted) {
dev_info(cdev, "configured R5F for IPC-only mode\n");
kproc->rproc->state = RPROC_DETACHED;
ret = 1;
@@ -1184,7 +1179,7 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
ret = 0;
} else {
dev_err(cdev, "mismatched mode: local_reset = %s, module_reset = %s, core_state = %s\n",
- !ret ? "deasserted" : "asserted",
+ !reset_ctrl_status ? "deasserted" : "asserted",
c_state ? "deasserted" : "asserted",
halted ? "halted" : "unhalted");
ret = -EINVAL;
@@ -1225,8 +1220,8 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
goto out;
}
- rproc = rproc_alloc(cdev, dev_name(cdev), &k3_r5_rproc_ops,
- fw_name, sizeof(*kproc));
+ rproc = devm_rproc_alloc(cdev, dev_name(cdev), &k3_r5_rproc_ops,
+ fw_name, sizeof(*kproc));
if (!rproc) {
ret = -ENOMEM;
goto out;
@@ -1244,9 +1239,13 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
kproc->rproc = rproc;
core->rproc = rproc;
+ ret = k3_r5_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
+
ret = k3_r5_rproc_configure_mode(kproc);
if (ret < 0)
- goto err_config;
+ goto out;
if (ret)
goto init_rmem;
@@ -1254,7 +1253,7 @@ static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
if (ret) {
dev_err(dev, "initial configure failed, ret = %d\n",
ret);
- goto err_config;
+ goto out;
}
init_rmem:
@@ -1264,13 +1263,13 @@ init_rmem:
if (ret) {
dev_err(dev, "reserved memory init failed, ret = %d\n",
ret);
- goto err_config;
+ goto out;
}
- ret = rproc_add(rproc);
+ ret = devm_rproc_add(dev, rproc);
if (ret) {
- dev_err(dev, "rproc_add failed, ret = %d\n", ret);
- goto err_add;
+ dev_err_probe(dev, ret, "rproc_add failed\n");
+ goto out;
}
/* create only one rproc in lockstep, single-cpu or
@@ -1280,6 +1279,26 @@ init_rmem:
cluster->mode == CLUSTER_MODE_SINGLECPU ||
cluster->mode == CLUSTER_MODE_SINGLECORE)
break;
+
+ /*
+ * R5 cores require to be powered on sequentially, core0
+ * should be in higher power state than core1 in a cluster
+ * So, wait for current core to power up before proceeding
+ * to next core and put timeout of 2sec for each core.
+ *
+ * This waiting mechanism is necessary because
+ * rproc_auto_boot_callback() for core1 can be called before
+ * core0 due to thread execution order.
+ */
+ ret = wait_event_interruptible_timeout(cluster->core_transition,
+ core->released_from_reset,
+ msecs_to_jiffies(2000));
+ if (ret <= 0) {
+ dev_err(dev,
+ "Timed out waiting for %s core to power up!\n",
+ rproc->name);
+ goto out;
+ }
}
return 0;
@@ -1294,12 +1313,6 @@ err_split:
}
}
- rproc_del(rproc);
-err_add:
- k3_r5_reserved_mem_exit(kproc);
-err_config:
- rproc_free(rproc);
- core->rproc = NULL;
out:
/* undo core0 upon any failures on core1 in split-mode */
if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) {
@@ -1341,12 +1354,7 @@ static void k3_r5_cluster_rproc_exit(void *data)
}
}
- rproc_del(rproc);
-
- k3_r5_reserved_mem_exit(kproc);
-
- rproc_free(rproc);
- core->rproc = NULL;
+ mbox_free_channel(kproc->mbox);
}
}
@@ -1479,30 +1487,11 @@ static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
return 0;
}
-static
-struct ti_sci_proc *k3_r5_core_of_get_tsp(struct device *dev,
- const struct ti_sci_handle *sci)
+static void k3_r5_release_tsp(void *data)
{
- struct ti_sci_proc *tsp;
- u32 temp[2];
- int ret;
-
- ret = of_property_read_u32_array(dev_of_node(dev), "ti,sci-proc-ids",
- temp, 2);
- if (ret < 0)
- return ERR_PTR(ret);
+ struct ti_sci_proc *tsp = data;
- tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL);
- if (!tsp)
- return ERR_PTR(-ENOMEM);
-
- tsp->dev = dev;
- tsp->sci = sci;
- tsp->ops = &sci->ops.proc_ops;
- tsp->proc_id = temp[0];
- tsp->host_id = temp[1];
-
- return tsp;
+ ti_sci_proc_release(tsp);
}
static int k3_r5_core_of_init(struct platform_device *pdev)
@@ -1552,11 +1541,7 @@ static int k3_r5_core_of_init(struct platform_device *pdev)
core->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
if (IS_ERR(core->ti_sci)) {
- ret = PTR_ERR(core->ti_sci);
- if (ret != -EPROBE_DEFER) {
- dev_err(dev, "failed to get ti-sci handle, ret = %d\n",
- ret);
- }
+ ret = dev_err_probe(dev, PTR_ERR(core->ti_sci), "failed to get ti-sci handle\n");
core->ti_sci = NULL;
goto err;
}
@@ -1572,18 +1557,14 @@ static int k3_r5_core_of_init(struct platform_device *pdev)
ret = PTR_ERR_OR_ZERO(core->reset);
if (!ret)
ret = -ENODEV;
- if (ret != -EPROBE_DEFER) {
- dev_err(dev, "failed to get reset handle, ret = %d\n",
- ret);
- }
+ dev_err_probe(dev, ret, "failed to get reset handle\n");
goto err;
}
- core->tsp = k3_r5_core_of_get_tsp(dev, core->ti_sci);
+ core->tsp = ti_sci_proc_of_get_tsp(dev, core->ti_sci);
if (IS_ERR(core->tsp)) {
- ret = PTR_ERR(core->tsp);
- dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
- ret);
+ ret = dev_err_probe(dev, PTR_ERR(core->tsp),
+ "failed to construct ti-sci proc control\n");
goto err;
}
@@ -1606,6 +1587,10 @@ static int k3_r5_core_of_init(struct platform_device *pdev)
goto err;
}
+ ret = devm_add_action_or_reset(dev, k3_r5_release_tsp, core->tsp);
+ if (ret)
+ goto err;
+
platform_set_drvdata(pdev, core);
devres_close_group(dev, k3_r5_core_of_init);
@@ -1622,13 +1607,7 @@ err:
*/
static void k3_r5_core_of_exit(struct platform_device *pdev)
{
- struct k3_r5_core *core = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
- int ret;
-
- ret = ti_sci_proc_release(core->tsp);
- if (ret)
- dev_err(dev, "failed to release proc, ret = %d\n", ret);
platform_set_drvdata(pdev, NULL);
devres_release_group(dev, k3_r5_core_of_init);
@@ -1653,16 +1632,14 @@ static int k3_r5_cluster_of_init(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev_of_node(dev);
struct platform_device *cpdev;
- struct device_node *child;
struct k3_r5_core *core;
int ret;
- for_each_available_child_of_node(np, child) {
+ for_each_available_child_of_node_scoped(np, child) {
cpdev = of_find_device_by_node(child);
if (!cpdev) {
ret = -ENODEV;
dev_err(dev, "could not get R5 core platform device\n");
- of_node_put(child);
goto fail;
}
@@ -1671,7 +1648,6 @@ static int k3_r5_cluster_of_init(struct platform_device *pdev)
dev_err(dev, "k3_r5_core_of_init failed, ret = %d\n",
ret);
put_device(&cpdev->dev);
- of_node_put(child);
goto fail;
}
@@ -1709,13 +1685,11 @@ static int k3_r5_probe(struct platform_device *pdev)
cluster->dev = dev;
cluster->soc_data = data;
INIT_LIST_HEAD(&cluster->cores);
+ init_waitqueue_head(&cluster->core_transition);
ret = of_property_read_u32(np, "ti,cluster-mode", &cluster->mode);
- if (ret < 0 && ret != -EINVAL) {
- dev_err(dev, "invalid format for ti,cluster-mode, ret = %d\n",
- ret);
- return ret;
- }
+ if (ret < 0 && ret != -EINVAL)
+ return dev_err_probe(dev, ret, "invalid format for ti,cluster-mode\n");
if (ret == -EINVAL) {
/*
@@ -1734,49 +1708,39 @@ static int k3_r5_probe(struct platform_device *pdev)
}
if ((cluster->mode == CLUSTER_MODE_SINGLECPU && !data->single_cpu_mode) ||
- (cluster->mode == CLUSTER_MODE_SINGLECORE && !data->is_single_core)) {
- dev_err(dev, "Cluster mode = %d is not supported on this SoC\n", cluster->mode);
- return -EINVAL;
- }
+ (cluster->mode == CLUSTER_MODE_SINGLECORE && !data->is_single_core))
+ return dev_err_probe(dev, -EINVAL,
+ "Cluster mode = %d is not supported on this SoC\n",
+ cluster->mode);
num_cores = of_get_available_child_count(np);
- if (num_cores != 2 && !data->is_single_core) {
- dev_err(dev, "MCU cluster requires both R5F cores to be enabled but num_cores is set to = %d\n",
- num_cores);
- return -ENODEV;
- }
+ if (num_cores != 2 && !data->is_single_core)
+ return dev_err_probe(dev, -ENODEV,
+ "MCU cluster requires both R5F cores to be enabled but num_cores is set to = %d\n",
+ num_cores);
- if (num_cores != 1 && data->is_single_core) {
- dev_err(dev, "SoC supports only single core R5 but num_cores is set to %d\n",
- num_cores);
- return -ENODEV;
- }
+ if (num_cores != 1 && data->is_single_core)
+ return dev_err_probe(dev, -ENODEV,
+ "SoC supports only single core R5 but num_cores is set to %d\n",
+ num_cores);
platform_set_drvdata(pdev, cluster);
ret = devm_of_platform_populate(dev);
- if (ret) {
- dev_err(dev, "devm_of_platform_populate failed, ret = %d\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "devm_of_platform_populate failed\n");
ret = k3_r5_cluster_of_init(pdev);
- if (ret) {
- dev_err(dev, "k3_r5_cluster_of_init failed, ret = %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "k3_r5_cluster_of_init failed\n");
ret = devm_add_action_or_reset(dev, k3_r5_cluster_of_exit, pdev);
if (ret)
return ret;
ret = k3_r5_cluster_rproc_init(pdev);
- if (ret) {
- dev_err(dev, "k3_r5_cluster_rproc_init failed, ret = %d\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "k3_r5_cluster_rproc_init failed\n");
ret = devm_add_action_or_reset(dev, k3_r5_cluster_rproc_exit, pdev);
if (ret)
diff --git a/drivers/remoteproc/ti_sci_proc.h b/drivers/remoteproc/ti_sci_proc.h
index 778558abcdcc..f3911ce75252 100644
--- a/drivers/remoteproc/ti_sci_proc.h
+++ b/drivers/remoteproc/ti_sci_proc.h
@@ -28,6 +28,32 @@ struct ti_sci_proc {
u8 host_id;
};
+static inline
+struct ti_sci_proc *ti_sci_proc_of_get_tsp(struct device *dev,
+ const struct ti_sci_handle *sci)
+{
+ struct ti_sci_proc *tsp;
+ u32 temp[2];
+ int ret;
+
+ ret = of_property_read_u32_array(dev_of_node(dev), "ti,sci-proc-ids",
+ temp, 2);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL);
+ if (!tsp)
+ return ERR_PTR(-ENOMEM);
+
+ tsp->dev = dev;
+ tsp->sci = sci;
+ tsp->ops = &sci->ops.proc_ops;
+ tsp->proc_id = temp[0];
+ tsp->host_id = temp[1];
+
+ return tsp;
+}
+
static inline int ti_sci_proc_request(struct ti_sci_proc *tsp)
{
int ret;
diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c
index 36a55f7ffa64..d8be21e71721 100644
--- a/drivers/remoteproc/wkup_m3_rproc.c
+++ b/drivers/remoteproc/wkup_m3_rproc.c
@@ -251,7 +251,7 @@ static const struct dev_pm_ops wkup_m3_rproc_pm_ops = {
static struct platform_driver wkup_m3_rproc_driver = {
.probe = wkup_m3_rproc_probe,
- .remove_new = wkup_m3_rproc_remove,
+ .remove = wkup_m3_rproc_remove,
.driver = {
.name = "wkup_m3_rproc",
.of_match_table = wkup_m3_rproc_of_match,
diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
index 4395edea9a64..5aeedeaf3c41 100644
--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
+++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
@@ -25,6 +25,10 @@
/* RX mailbox client buffer max length */
#define MBOX_CLIENT_BUF_MAX (IPI_BUF_LEN_MAX + \
sizeof(struct zynqmp_ipi_message))
+
+#define RSC_TBL_XLNX_MAGIC ((uint32_t)'x' << 24 | (uint32_t)'a' << 16 | \
+ (uint32_t)'m' << 8 | (uint32_t)'p')
+
/*
* settings for RPU cluster mode which
* reflects possible values of xlnx,cluster-mode dt-property
@@ -53,6 +57,17 @@ struct mem_bank_data {
};
/**
+ * struct zynqmp_sram_bank - sram bank description
+ *
+ * @sram_res: sram address region information
+ * @da: device address of sram
+ */
+struct zynqmp_sram_bank {
+ struct resource sram_res;
+ u32 da;
+};
+
+/**
* struct mbox_info
*
* @rx_mc_buf: to copy data from mailbox rx channel
@@ -73,9 +88,29 @@ struct mbox_info {
struct mbox_chan *rx_chan;
};
+/**
+ * struct rsc_tbl_data
+ *
+ * Platform specific data structure used to sync resource table address.
+ * It's important to maintain order and size of each field on remote side.
+ *
+ * @version: version of data structure
+ * @magic_num: 32-bit magic number.
+ * @comp_magic_num: complement of above magic number
+ * @rsc_tbl_size: resource table size
+ * @rsc_tbl: resource table address
+ */
+struct rsc_tbl_data {
+ const int version;
+ const u32 magic_num;
+ const u32 comp_magic_num;
+ const u32 rsc_tbl_size;
+ const uintptr_t rsc_tbl;
+} __packed;
+
/*
- * Hardcoded TCM bank values. This will be removed once TCM bindings are
- * accepted for system-dt specifications and upstreamed in linux kernel
+ * Hardcoded TCM bank values. This will stay in driver to maintain backward
+ * compatibility with device-tree that does not have TCM information.
*/
static const struct mem_bank_data zynqmp_tcm_banks_split[] = {
{0xffe00000UL, 0x0, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each */
@@ -84,31 +119,39 @@ static const struct mem_bank_data zynqmp_tcm_banks_split[] = {
{0xffeb0000UL, 0x20000, 0x10000UL, PD_R5_1_BTCM, "btcm1"},
};
-/* In lockstep mode cluster combines each 64KB TCM and makes 128KB TCM */
+/* In lockstep mode cluster uses each 64KB TCM from second core as well */
static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
- {0xffe00000UL, 0x0, 0x20000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 128KB each */
- {0xffe20000UL, 0x20000, 0x20000UL, PD_R5_0_BTCM, "btcm0"},
- {0, 0, 0, PD_R5_1_ATCM, ""},
- {0, 0, 0, PD_R5_1_BTCM, ""},
+ {0xffe00000UL, 0x0, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each */
+ {0xffe20000UL, 0x20000, 0x10000UL, PD_R5_0_BTCM, "btcm0"},
+ {0xffe10000UL, 0x10000, 0x10000UL, PD_R5_1_ATCM, "atcm1"},
+ {0xffe30000UL, 0x30000, 0x10000UL, PD_R5_1_BTCM, "btcm1"},
};
/**
* struct zynqmp_r5_core
*
+ * @rsc_tbl_va: resource table virtual address
+ * @sram: Array of sram memories assigned to this core
+ * @num_sram: number of sram for this core
* @dev: device of RPU instance
* @np: device node of RPU instance
* @tcm_bank_count: number TCM banks accessible to this RPU
* @tcm_banks: array of each TCM bank data
* @rproc: rproc handle
+ * @rsc_tbl_size: resource table size retrieved from remote
* @pm_domain_id: RPU CPU power domain id
* @ipi: pointer to mailbox information
*/
struct zynqmp_r5_core {
+ void __iomem *rsc_tbl_va;
+ struct zynqmp_sram_bank *sram;
+ int num_sram;
struct device *dev;
struct device_node *np;
int tcm_bank_count;
struct mem_bank_data **tcm_banks;
struct rproc *rproc;
+ u32 rsc_tbl_size;
u32 pm_domain_id;
struct mbox_info *ipi;
};
@@ -301,36 +344,6 @@ static void zynqmp_r5_rproc_kick(struct rproc *rproc, int vqid)
}
/*
- * zynqmp_r5_set_mode()
- *
- * set RPU cluster and TCM operation mode
- *
- * @r5_core: pointer to zynqmp_r5_core type object
- * @fw_reg_val: value expected by firmware to configure RPU cluster mode
- * @tcm_mode: value expected by fw to configure TCM mode (lockstep or split)
- *
- * Return: 0 for success and < 0 for failure
- */
-static int zynqmp_r5_set_mode(struct zynqmp_r5_core *r5_core,
- enum rpu_oper_mode fw_reg_val,
- enum rpu_tcm_comb tcm_mode)
-{
- int ret;
-
- ret = zynqmp_pm_set_rpu_mode(r5_core->pm_domain_id, fw_reg_val);
- if (ret < 0) {
- dev_err(r5_core->dev, "failed to set RPU mode\n");
- return ret;
- }
-
- ret = zynqmp_pm_set_tcm_config(r5_core->pm_domain_id, tcm_mode);
- if (ret < 0)
- dev_err(r5_core->dev, "failed to configure TCM\n");
-
- return ret;
-}
-
-/*
* zynqmp_r5_rproc_start()
* @rproc: single R5 core's corresponding rproc instance
*
@@ -486,6 +499,7 @@ static int add_mem_regions_carveout(struct rproc *rproc)
}
rproc_add_carveout(rproc, rproc_mem);
+ rproc_coredump_add_segment(rproc, rmem->base, rmem->size);
dev_dbg(&rproc->dev, "reserved mem carveout %s addr=%llx, size=0x%llx",
it.node->name, rmem->base, rmem->size);
@@ -495,6 +509,45 @@ static int add_mem_regions_carveout(struct rproc *rproc)
return 0;
}
+static int add_sram_carveouts(struct rproc *rproc)
+{
+ struct zynqmp_r5_core *r5_core = rproc->priv;
+ struct rproc_mem_entry *rproc_mem;
+ struct zynqmp_sram_bank *sram;
+ dma_addr_t dma_addr;
+ size_t len;
+ int da, i;
+
+ for (i = 0; i < r5_core->num_sram; i++) {
+ sram = &r5_core->sram[i];
+
+ dma_addr = (dma_addr_t)sram->sram_res.start;
+
+ len = resource_size(&sram->sram_res);
+ da = sram->da;
+
+ rproc_mem = rproc_mem_entry_init(&rproc->dev, NULL,
+ dma_addr,
+ len, da,
+ zynqmp_r5_mem_region_map,
+ zynqmp_r5_mem_region_unmap,
+ sram->sram_res.name);
+ if (!rproc_mem) {
+ dev_err(&rproc->dev, "failed to add sram %s da=0x%x, size=0x%lx",
+ sram->sram_res.name, da, len);
+ return -ENOMEM;
+ }
+
+ rproc_add_carveout(rproc, rproc_mem);
+ rproc_coredump_add_segment(rproc, da, len);
+
+ dev_dbg(&rproc->dev, "sram carveout %s addr=%llx, da=0x%x, size=0x%lx",
+ sram->sram_res.name, dma_addr, da, len);
+ }
+
+ return 0;
+}
+
/*
* tcm_mem_unmap()
* @rproc: single R5 core's corresponding rproc instance
@@ -540,14 +593,14 @@ static int tcm_mem_map(struct rproc *rproc,
}
/*
- * add_tcm_carveout_split_mode()
+ * add_tcm_banks()
* @rproc: single R5 core's corresponding rproc instance
*
- * allocate and add remoteproc carveout for TCM memory in split mode
+ * allocate and add remoteproc carveout for TCM memory
*
* return 0 on success, otherwise non-zero value on failure
*/
-static int add_tcm_carveout_split_mode(struct rproc *rproc)
+static int add_tcm_banks(struct rproc *rproc)
{
struct rproc_mem_entry *rproc_mem;
struct zynqmp_r5_core *r5_core;
@@ -580,90 +633,20 @@ static int add_tcm_carveout_split_mode(struct rproc *rproc)
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
if (ret < 0) {
dev_err(dev, "failed to turn on TCM 0x%x", pm_domain_id);
- goto release_tcm_split;
+ goto release_tcm;
}
- dev_dbg(dev, "TCM carveout split mode %s addr=%llx, da=0x%x, size=0x%lx",
+ dev_dbg(dev, "TCM carveout %s addr=%llx, da=0x%x, size=0x%lx",
bank_name, bank_addr, da, bank_size);
- rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr,
- bank_size, da,
- tcm_mem_map, tcm_mem_unmap,
- bank_name);
- if (!rproc_mem) {
- ret = -ENOMEM;
- zynqmp_pm_release_node(pm_domain_id);
- goto release_tcm_split;
- }
-
- rproc_add_carveout(rproc, rproc_mem);
- }
-
- return 0;
-
-release_tcm_split:
- /* If failed, Turn off all TCM banks turned on before */
- for (i--; i >= 0; i--) {
- pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
- zynqmp_pm_release_node(pm_domain_id);
- }
- return ret;
-}
-
-/*
- * add_tcm_carveout_lockstep_mode()
- * @rproc: single R5 core's corresponding rproc instance
- *
- * allocate and add remoteproc carveout for TCM memory in lockstep mode
- *
- * return 0 on success, otherwise non-zero value on failure
- */
-static int add_tcm_carveout_lockstep_mode(struct rproc *rproc)
-{
- struct rproc_mem_entry *rproc_mem;
- struct zynqmp_r5_core *r5_core;
- int i, num_banks, ret;
- phys_addr_t bank_addr;
- size_t bank_size = 0;
- struct device *dev;
- u32 pm_domain_id;
- char *bank_name;
- u32 da;
-
- r5_core = rproc->priv;
- dev = r5_core->dev;
-
- /* Go through zynqmp banks for r5 node */
- num_banks = r5_core->tcm_bank_count;
-
- /*
- * In lockstep mode, TCM is contiguous memory block
- * However, each TCM block still needs to be enabled individually.
- * So, Enable each TCM block individually.
- * Although ATCM and BTCM is contiguous memory block, add two separate
- * carveouts for both.
- */
- for (i = 0; i < num_banks; i++) {
- pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
-
- /* Turn on each TCM bank individually */
- ret = zynqmp_pm_request_node(pm_domain_id,
- ZYNQMP_PM_CAPABILITY_ACCESS, 0,
- ZYNQMP_PM_REQUEST_ACK_BLOCKING);
- if (ret < 0) {
- dev_err(dev, "failed to turn on TCM 0x%x", pm_domain_id);
- goto release_tcm_lockstep;
- }
-
- bank_size = r5_core->tcm_banks[i]->size;
- if (bank_size == 0)
+ /*
+ * In DETACHED state firmware is already running so no need to
+ * request add TCM registers. However, request TCM PD node to let
+ * platform management firmware know that TCM is in use.
+ */
+ if (rproc->state == RPROC_DETACHED)
continue;
- bank_addr = r5_core->tcm_banks[i]->addr;
- da = r5_core->tcm_banks[i]->da;
- bank_name = r5_core->tcm_banks[i]->bank_name;
-
- /* Register TCM address range, TCM map and unmap functions */
rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr,
bank_size, da,
tcm_mem_map, tcm_mem_unmap,
@@ -671,19 +654,16 @@ static int add_tcm_carveout_lockstep_mode(struct rproc *rproc)
if (!rproc_mem) {
ret = -ENOMEM;
zynqmp_pm_release_node(pm_domain_id);
- goto release_tcm_lockstep;
+ goto release_tcm;
}
- /* If registration is success, add carveouts */
rproc_add_carveout(rproc, rproc_mem);
-
- dev_dbg(dev, "TCM carveout lockstep mode %s addr=0x%llx, da=0x%x, size=0x%lx",
- bank_name, bank_addr, da, bank_size);
+ rproc_coredump_add_segment(rproc, da, bank_size);
}
return 0;
-release_tcm_lockstep:
+release_tcm:
/* If failed, Turn off all TCM banks turned on before */
for (i--; i >= 0; i--) {
pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
@@ -693,45 +673,6 @@ release_tcm_lockstep:
}
/*
- * add_tcm_banks()
- * @rproc: single R5 core's corresponding rproc instance
- *
- * allocate and add remoteproc carveouts for TCM memory based on cluster mode
- *
- * return 0 on success, otherwise non-zero value on failure
- */
-static int add_tcm_banks(struct rproc *rproc)
-{
- struct zynqmp_r5_cluster *cluster;
- struct zynqmp_r5_core *r5_core;
- struct device *dev;
-
- r5_core = rproc->priv;
- if (!r5_core)
- return -EINVAL;
-
- dev = r5_core->dev;
-
- cluster = dev_get_drvdata(dev->parent);
- if (!cluster) {
- dev_err(dev->parent, "Invalid driver data\n");
- return -EINVAL;
- }
-
- /*
- * In lockstep mode TCM banks are one contiguous memory region of 256Kb
- * In split mode, each TCM bank is 64Kb and not contiguous.
- * We add memory carveouts accordingly.
- */
- if (cluster->mode == SPLIT_MODE)
- return add_tcm_carveout_split_mode(rproc);
- else if (cluster->mode == LOCKSTEP_MODE)
- return add_tcm_carveout_lockstep_mode(rproc);
-
- return -EINVAL;
-}
-
-/*
* zynqmp_r5_parse_fw()
* @rproc: single R5 core's corresponding rproc instance
* @fw: ptr to firmware to be loaded onto r5 core
@@ -782,6 +723,12 @@ static int zynqmp_r5_rproc_prepare(struct rproc *rproc)
return ret;
}
+ ret = add_sram_carveouts(rproc);
+ if (ret) {
+ dev_err(&rproc->dev, "failed to get sram carveout %d\n", ret);
+ return ret;
+ }
+
return 0;
}
@@ -811,6 +758,107 @@ static int zynqmp_r5_rproc_unprepare(struct rproc *rproc)
return 0;
}
+static struct resource_table *zynqmp_r5_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *size)
+{
+ struct zynqmp_r5_core *r5_core;
+
+ r5_core = rproc->priv;
+
+ *size = r5_core->rsc_tbl_size;
+
+ return (struct resource_table *)r5_core->rsc_tbl_va;
+}
+
+static int zynqmp_r5_get_rsc_table_va(struct zynqmp_r5_core *r5_core)
+{
+ struct resource_table *rsc_tbl_addr;
+ struct device *dev = r5_core->dev;
+ struct rsc_tbl_data *rsc_data_va;
+ struct resource res_mem;
+ struct device_node *np;
+ int ret;
+
+ /*
+ * It is expected from remote processor firmware to provide resource
+ * table address via struct rsc_tbl_data data structure.
+ * Start address of first entry under "memory-region" property list
+ * contains that data structure which holds resource table address, size
+ * and some magic number to validate correct resource table entry.
+ */
+ np = of_parse_phandle(r5_core->np, "memory-region", 0);
+ if (!np) {
+ dev_err(dev, "failed to get memory region dev node\n");
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(np, 0, &res_mem);
+ of_node_put(np);
+ if (ret) {
+ dev_err(dev, "failed to get memory-region resource addr\n");
+ return -EINVAL;
+ }
+
+ rsc_data_va = (struct rsc_tbl_data *)ioremap_wc(res_mem.start,
+ sizeof(struct rsc_tbl_data));
+ if (!rsc_data_va) {
+ dev_err(dev, "failed to map resource table data address\n");
+ return -EIO;
+ }
+
+ /*
+ * If RSC_TBL_XLNX_MAGIC number and its complement isn't found then
+ * do not consider resource table address valid and don't attach
+ */
+ if (rsc_data_va->magic_num != RSC_TBL_XLNX_MAGIC ||
+ rsc_data_va->comp_magic_num != ~RSC_TBL_XLNX_MAGIC) {
+ dev_dbg(dev, "invalid magic number, won't attach\n");
+ return -EINVAL;
+ }
+
+ r5_core->rsc_tbl_va = ioremap_wc(rsc_data_va->rsc_tbl,
+ rsc_data_va->rsc_tbl_size);
+ if (!r5_core->rsc_tbl_va) {
+ dev_err(dev, "failed to get resource table va\n");
+ return -EINVAL;
+ }
+
+ rsc_tbl_addr = (struct resource_table *)r5_core->rsc_tbl_va;
+
+ /*
+ * As of now resource table version 1 is expected. Don't fail to attach
+ * but warn users about it.
+ */
+ if (rsc_tbl_addr->ver != 1)
+ dev_warn(dev, "unexpected resource table version %d\n",
+ rsc_tbl_addr->ver);
+
+ r5_core->rsc_tbl_size = rsc_data_va->rsc_tbl_size;
+
+ iounmap((void __iomem *)rsc_data_va);
+
+ return 0;
+}
+
+static int zynqmp_r5_attach(struct rproc *rproc)
+{
+ dev_dbg(&rproc->dev, "rproc %d attached\n", rproc->index);
+
+ return 0;
+}
+
+static int zynqmp_r5_detach(struct rproc *rproc)
+{
+ /*
+ * Generate last notification to remote after clearing virtio flag.
+ * Remote can avoid polling on virtio reset flag if kick is generated
+ * during detach by host and check virtio reset flag on kick interrupt.
+ */
+ zynqmp_r5_rproc_kick(rproc, 0);
+
+ return 0;
+}
+
static const struct rproc_ops zynqmp_r5_rproc_ops = {
.prepare = zynqmp_r5_rproc_prepare,
.unprepare = zynqmp_r5_rproc_unprepare,
@@ -822,6 +870,9 @@ static const struct rproc_ops zynqmp_r5_rproc_ops = {
.sanity_check = rproc_elf_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
.kick = zynqmp_r5_rproc_kick,
+ .get_loaded_rsc_table = zynqmp_r5_get_loaded_rsc_table,
+ .attach = zynqmp_r5_attach,
+ .detach = zynqmp_r5_detach,
};
/**
@@ -853,6 +904,8 @@ static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
return ERR_PTR(-ENOMEM);
}
+ rproc_coredump_set_elf_info(r5_rproc, ELFCLASS32, EM_ARM);
+
r5_rproc->auto_boot = false;
r5_core = r5_rproc->priv;
r5_core->dev = cdev;
@@ -870,6 +923,16 @@ static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
goto free_rproc;
}
+ /*
+ * If firmware is already available in the memory then move rproc state
+ * to DETACHED. Firmware can be preloaded via debugger or by any other
+ * agent (processors) in the system.
+ * If firmware isn't available in the memory and resource table isn't
+ * found, then rproc state remains OFFLINE.
+ */
+ if (!zynqmp_r5_get_rsc_table_va(r5_core))
+ r5_rproc->state = RPROC_DETACHED;
+
r5_core->rproc = r5_rproc;
return r5_core;
@@ -878,6 +941,174 @@ free_rproc:
return ERR_PTR(ret);
}
+static int zynqmp_r5_get_sram_banks(struct zynqmp_r5_core *r5_core)
+{
+ struct device_node *np = r5_core->np;
+ struct device *dev = r5_core->dev;
+ struct zynqmp_sram_bank *sram;
+ struct device_node *sram_np;
+ int num_sram, i, ret;
+ u64 abs_addr, size;
+
+ /* "sram" is optional property. Do not fail, if unavailable. */
+ if (!of_property_present(r5_core->np, "sram"))
+ return 0;
+
+ num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
+ if (num_sram <= 0) {
+ dev_err(dev, "Invalid sram property, ret = %d\n",
+ num_sram);
+ return -EINVAL;
+ }
+
+ sram = devm_kcalloc(dev, num_sram,
+ sizeof(struct zynqmp_sram_bank), GFP_KERNEL);
+ if (!sram)
+ return -ENOMEM;
+
+ for (i = 0; i < num_sram; i++) {
+ sram_np = of_parse_phandle(np, "sram", i);
+ if (!sram_np) {
+ dev_err(dev, "failed to get sram %d phandle\n", i);
+ return -EINVAL;
+ }
+
+ if (!of_device_is_available(sram_np)) {
+ dev_err(dev, "sram device not available\n");
+ ret = -EINVAL;
+ goto fail_sram_get;
+ }
+
+ ret = of_address_to_resource(sram_np, 0, &sram[i].sram_res);
+ if (ret) {
+ dev_err(dev, "addr to res failed\n");
+ goto fail_sram_get;
+ }
+
+ /* Get SRAM device address */
+ ret = of_property_read_reg(sram_np, i, &abs_addr, &size);
+ if (ret) {
+ dev_err(dev, "failed to get reg property\n");
+ goto fail_sram_get;
+ }
+
+ sram[i].da = (u32)abs_addr;
+
+ of_node_put(sram_np);
+
+ dev_dbg(dev, "sram %d: name=%s, addr=0x%llx, da=0x%x, size=0x%llx\n",
+ i, sram[i].sram_res.name, sram[i].sram_res.start,
+ sram[i].da, resource_size(&sram[i].sram_res));
+ }
+
+ r5_core->sram = sram;
+ r5_core->num_sram = num_sram;
+
+ return 0;
+
+fail_sram_get:
+ of_node_put(sram_np);
+
+ return ret;
+}
+
+static int zynqmp_r5_get_tcm_node_from_dt(struct zynqmp_r5_cluster *cluster)
+{
+ int i, j, tcm_bank_count, ret, tcm_pd_idx, pd_count;
+ struct of_phandle_args out_args;
+ struct zynqmp_r5_core *r5_core;
+ struct platform_device *cpdev;
+ struct mem_bank_data *tcm;
+ struct device_node *np;
+ struct resource *res;
+ u64 abs_addr, size;
+ struct device *dev;
+
+ for (i = 0; i < cluster->core_count; i++) {
+ r5_core = cluster->r5_cores[i];
+ dev = r5_core->dev;
+ np = r5_core->np;
+
+ pd_count = of_count_phandle_with_args(np, "power-domains",
+ "#power-domain-cells");
+
+ if (pd_count <= 0) {
+ dev_err(dev, "invalid power-domains property, %d\n", pd_count);
+ return -EINVAL;
+ }
+
+ /* First entry in power-domains list is for r5 core, rest for TCM. */
+ tcm_bank_count = pd_count - 1;
+
+ if (tcm_bank_count <= 0) {
+ dev_err(dev, "invalid TCM count %d\n", tcm_bank_count);
+ return -EINVAL;
+ }
+
+ r5_core->tcm_banks = devm_kcalloc(dev, tcm_bank_count,
+ sizeof(struct mem_bank_data *),
+ GFP_KERNEL);
+ if (!r5_core->tcm_banks)
+ return -ENOMEM;
+
+ r5_core->tcm_bank_count = tcm_bank_count;
+ for (j = 0, tcm_pd_idx = 1; j < tcm_bank_count; j++, tcm_pd_idx++) {
+ tcm = devm_kzalloc(dev, sizeof(struct mem_bank_data),
+ GFP_KERNEL);
+ if (!tcm)
+ return -ENOMEM;
+
+ r5_core->tcm_banks[j] = tcm;
+
+ /* Get power-domains id of TCM. */
+ ret = of_parse_phandle_with_args(np, "power-domains",
+ "#power-domain-cells",
+ tcm_pd_idx, &out_args);
+ if (ret) {
+ dev_err(r5_core->dev,
+ "failed to get tcm %d pm domain, ret %d\n",
+ tcm_pd_idx, ret);
+ return ret;
+ }
+ tcm->pm_domain_id = out_args.args[0];
+ of_node_put(out_args.np);
+
+ /* Get TCM address without translation. */
+ ret = of_property_read_reg(np, j, &abs_addr, &size);
+ if (ret) {
+ dev_err(dev, "failed to get reg property\n");
+ return ret;
+ }
+
+ /*
+ * Remote processor can address only 32 bits
+ * so convert 64-bits into 32-bits. This will discard
+ * any unwanted upper 32-bits.
+ */
+ tcm->da = (u32)abs_addr;
+ tcm->size = (u32)size;
+
+ cpdev = to_platform_device(dev);
+ res = platform_get_resource(cpdev, IORESOURCE_MEM, j);
+ if (!res) {
+ dev_err(dev, "failed to get tcm resource\n");
+ return -EINVAL;
+ }
+
+ tcm->addr = (u32)res->start;
+ tcm->bank_name = (char *)res->name;
+ res = devm_request_mem_region(dev, tcm->addr, tcm->size,
+ tcm->bank_name);
+ if (!res) {
+ dev_err(dev, "failed to request tcm resource\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
/**
* zynqmp_r5_get_tcm_node()
* Ideally this function should parse tcm node and store information
@@ -954,11 +1185,18 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
{
struct device *dev = cluster->dev;
struct zynqmp_r5_core *r5_core;
- int ret, i;
+ int ret = -EINVAL, i;
- ret = zynqmp_r5_get_tcm_node(cluster);
- if (ret < 0) {
- dev_err(dev, "can't get tcm node, err %d\n", ret);
+ r5_core = cluster->r5_cores[0];
+
+ /* Maintain backward compatibility for zynqmp by using hardcode TCM address. */
+ if (of_property_present(r5_core->np, "reg"))
+ ret = zynqmp_r5_get_tcm_node_from_dt(cluster);
+ else if (device_is_compatible(dev, "xlnx,zynqmp-r5fss"))
+ ret = zynqmp_r5_get_tcm_node(cluster);
+
+ if (ret) {
+ dev_err(dev, "can't get tcm, err %d\n", ret);
return ret;
}
@@ -973,12 +1211,25 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
return ret;
}
- ret = zynqmp_r5_set_mode(r5_core, fw_reg_val, tcm_mode);
- if (ret) {
- dev_err(dev, "failed to set r5 cluster mode %d, err %d\n",
- cluster->mode, ret);
+ ret = zynqmp_pm_set_rpu_mode(r5_core->pm_domain_id, fw_reg_val);
+ if (ret < 0) {
+ dev_err(r5_core->dev, "failed to set RPU mode\n");
return ret;
}
+
+ if (of_property_present(dev_of_node(dev), "xlnx,tcm-mode") ||
+ device_is_compatible(dev, "xlnx,zynqmp-r5fss")) {
+ ret = zynqmp_pm_set_tcm_config(r5_core->pm_domain_id,
+ tcm_mode);
+ if (ret < 0) {
+ dev_err(r5_core->dev, "failed to configure TCM\n");
+ return ret;
+ }
+ }
+
+ ret = zynqmp_r5_get_sram_banks(r5_core);
+ if (ret)
+ return ret;
}
return 0;
@@ -1023,16 +1274,27 @@ static int zynqmp_r5_cluster_init(struct zynqmp_r5_cluster *cluster)
* fail driver probe if either of that is not set in dts.
*/
if (cluster_mode == LOCKSTEP_MODE) {
- tcm_mode = PM_RPU_TCM_COMB;
fw_reg_val = PM_RPU_MODE_LOCKSTEP;
} else if (cluster_mode == SPLIT_MODE) {
- tcm_mode = PM_RPU_TCM_SPLIT;
fw_reg_val = PM_RPU_MODE_SPLIT;
} else {
dev_err(dev, "driver does not support cluster mode %d\n", cluster_mode);
return -EINVAL;
}
+ if (of_property_present(dev_node, "xlnx,tcm-mode")) {
+ ret = of_property_read_u32(dev_node, "xlnx,tcm-mode", (u32 *)&tcm_mode);
+ if (ret)
+ return ret;
+ } else if (device_is_compatible(dev, "xlnx,zynqmp-r5fss")) {
+ if (cluster_mode == LOCKSTEP_MODE)
+ tcm_mode = PM_RPU_TCM_COMB;
+ else
+ tcm_mode = PM_RPU_TCM_SPLIT;
+ } else {
+ tcm_mode = PM_RPU_TCM_COMB;
+ }
+
/*
* Number of cores is decided by number of child nodes of
* r5f subsystem node in dts. If Split mode is used in dts
@@ -1157,6 +1419,7 @@ static void zynqmp_r5_cluster_exit(void *data)
for (i = 0; i < cluster->core_count; i++) {
r5_core = cluster->r5_cores[i];
zynqmp_r5_free_mbox(r5_core->ipi);
+ iounmap(r5_core->rsc_tbl_va);
of_reserved_mem_device_release(r5_core->dev);
put_device(r5_core->dev);
rproc_del(r5_core->rproc);
@@ -1216,6 +1479,8 @@ static int zynqmp_r5_remoteproc_probe(struct platform_device *pdev)
/* Match table for OF platform binding */
static const struct of_device_id zynqmp_r5_remoteproc_match[] = {
+ { .compatible = "xlnx,versal-net-r52fss", },
+ { .compatible = "xlnx,versal-r5fss", },
{ .compatible = "xlnx,zynqmp-r5fss", },
{ /* end of list */ },
};