summaryrefslogtreecommitdiff
path: root/drivers/bus
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/bus')
-rw-r--r--drivers/bus/Kconfig17
-rw-r--r--drivers/bus/Makefile1
-rw-r--r--drivers/bus/arm-integrator-lm.c1
-rw-r--r--drivers/bus/brcmstb_gisb.c26
-rw-r--r--drivers/bus/bt1-apb.c24
-rw-r--r--drivers/bus/bt1-axi.c23
-rw-r--r--drivers/bus/fsl-mc/dpmcp.c22
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c10
-rw-r--r--drivers/bus/fsl-mc/dprc.c4
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-allocator.c26
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c55
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h8
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-uapi.c11
-rw-r--r--drivers/bus/fsl-mc/mc-io.c39
-rw-r--r--drivers/bus/fsl-mc/mc-sys.c2
-rw-r--r--drivers/bus/hisi_lpc.c2
-rw-r--r--drivers/bus/imx-weim.c16
-rw-r--r--drivers/bus/mhi/common.h38
-rw-r--r--drivers/bus/mhi/ep/main.c25
-rw-r--r--drivers/bus/mhi/ep/ring.c16
-rw-r--r--drivers/bus/mhi/host/boot.c219
-rw-r--r--drivers/bus/mhi/host/init.c140
-rw-r--r--drivers/bus/mhi/host/internal.h69
-rw-r--r--drivers/bus/mhi/host/main.c70
-rw-r--r--drivers/bus/mhi/host/pci_generic.c377
-rw-r--r--drivers/bus/mhi/host/pm.c101
-rw-r--r--drivers/bus/mhi/host/trace.h283
-rw-r--r--drivers/bus/mips_cdmm.c8
-rw-r--r--drivers/bus/moxtet.c14
-rw-r--r--drivers/bus/omap-ocp2scp.c2
-rw-r--r--drivers/bus/omap_l3_smx.c2
-rw-r--r--drivers/bus/qcom-ssc-block-bus.c36
-rw-r--r--drivers/bus/simple-pm-bus.c24
-rw-r--r--drivers/bus/stm32_etzpc.c141
-rw-r--r--drivers/bus/stm32_firewall.c294
-rw-r--r--drivers/bus/stm32_firewall.h83
-rw-r--r--drivers/bus/stm32_rifsc.c252
-rw-r--r--drivers/bus/sun50i-de2.c2
-rw-r--r--drivers/bus/sunxi-rsb.c44
-rw-r--r--drivers/bus/tegra-aconnect.c2
-rw-r--r--drivers/bus/tegra-gmi.c2
-rw-r--r--drivers/bus/ti-pwmss.c2
-rw-r--r--drivers/bus/ti-sysc.c248
-rw-r--r--drivers/bus/ts-nbus.c83
-rw-r--r--drivers/bus/vexpress-config.c1
45 files changed, 2108 insertions, 757 deletions
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index e6742998f372..ff669a8ccad9 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -89,7 +89,7 @@ config HISILICON_LPC
config IMX_WEIM
bool "Freescale EIM DRIVER"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
help
Driver for i.MX WEIM controller.
The WEIM(Wireless External Interface Module) works like a bus.
@@ -163,6 +163,16 @@ config QCOM_SSC_BLOCK_BUS
i2c/spi/uart controllers, a hexagon core, and a clock controller
which provides clocks for the above.
+config STM32_FIREWALL
+ bool "STM32 Firewall framework"
+ depends on (ARCH_STM32 || COMPILE_TEST) && OF
+ select OF_DYNAMIC
+ help
+ Say y to enable STM32 firewall framework and its services. Firewall
+ controllers will be able to register to the framework. Access for
+ hardware resources linked to a firewall controller can be requested
+ through this STM32 framework.
+
config SUN50I_DE2_BUS
bool "Allwinner A64 DE2 Bus Driver"
default ARM64
@@ -186,11 +196,12 @@ config SUNXI_RSB
config TEGRA_ACONNECT
tristate "Tegra ACONNECT Bus Driver"
- depends on ARCH_TEGRA_210_SOC
+ depends on ARCH_TEGRA
depends on OF && PM
help
Driver for the Tegra ACONNECT bus which is used to interface with
- the devices inside the Audio Processing Engine (APE) for Tegra210.
+ the devices inside the Audio Processing Engine (APE) for
+ Tegra210 and later.
config TEGRA_GMI
tristate "Tegra Generic Memory Interface bus driver"
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index d90eed189a65..cddd4984d6af 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o
obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
obj-$(CONFIG_QCOM_EBI2) += qcom-ebi2.o
obj-$(CONFIG_QCOM_SSC_BLOCK_BUS) += qcom-ssc-block-bus.o
+obj-$(CONFIG_STM32_FIREWALL) += stm32_firewall.o stm32_rifsc.o stm32_etzpc.o
obj-$(CONFIG_SUN50I_DE2_BUS) += sun50i-de2.o
obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o
obj-$(CONFIG_OF) += simple-pm-bus.o
diff --git a/drivers/bus/arm-integrator-lm.c b/drivers/bus/arm-integrator-lm.c
index b715c8ab36e8..a65c79b08804 100644
--- a/drivers/bus/arm-integrator-lm.c
+++ b/drivers/bus/arm-integrator-lm.c
@@ -85,6 +85,7 @@ static int integrator_ap_lm_probe(struct platform_device *pdev)
return -ENODEV;
}
map = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
if (IS_ERR(map)) {
dev_err(dev,
"could not find Integrator/AP system controller\n");
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index b6dfe4340da2..91ef99c42344 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -96,6 +96,20 @@ static const int gisb_offsets_bcm7400[] = {
[ARB_ERR_CAP_MASTER] = 0x0d8,
};
+static const int gisb_offsets_bcm74165[] = {
+ [ARB_TIMER] = 0x008,
+ [ARB_BP_CAP_CLR] = 0x044,
+ [ARB_BP_CAP_HI_ADDR] = -1,
+ [ARB_BP_CAP_ADDR] = 0x048,
+ [ARB_BP_CAP_STATUS] = 0x058,
+ [ARB_BP_CAP_MASTER] = 0x05c,
+ [ARB_ERR_CAP_CLR] = 0x038,
+ [ARB_ERR_CAP_HI_ADDR] = -1,
+ [ARB_ERR_CAP_ADDR] = 0x020,
+ [ARB_ERR_CAP_STATUS] = 0x030,
+ [ARB_ERR_CAP_MASTER] = 0x034,
+};
+
static const int gisb_offsets_bcm7435[] = {
[ARB_TIMER] = 0x00c,
[ARB_BP_CAP_CLR] = 0x014,
@@ -381,10 +395,7 @@ static struct attribute *gisb_arb_sysfs_attrs[] = {
&dev_attr_gisb_arb_timeout.attr,
NULL,
};
-
-static struct attribute_group gisb_arb_sysfs_attr_group = {
- .attrs = gisb_arb_sysfs_attrs,
-};
+ATTRIBUTE_GROUPS(gisb_arb_sysfs);
static const struct of_device_id brcmstb_gisb_arb_of_match[] = {
{ .compatible = "brcm,gisb-arb", .data = gisb_offsets_bcm7445 },
@@ -393,8 +404,10 @@ static const struct of_device_id brcmstb_gisb_arb_of_match[] = {
{ .compatible = "brcm,bcm7400-gisb-arb", .data = gisb_offsets_bcm7400 },
{ .compatible = "brcm,bcm7278-gisb-arb", .data = gisb_offsets_bcm7278 },
{ .compatible = "brcm,bcm7038-gisb-arb", .data = gisb_offsets_bcm7038 },
+ { .compatible = "brcm,bcm74165-gisb-arb", .data = gisb_offsets_bcm74165 },
{ },
};
+MODULE_DEVICE_TABLE(of, brcmstb_gisb_arb_of_match);
static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
{
@@ -474,10 +487,6 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
}
}
- err = sysfs_create_group(&pdev->dev.kobj, &gisb_arb_sysfs_attr_group);
- if (err)
- return err;
-
platform_set_drvdata(pdev, gdev);
list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list);
@@ -534,6 +543,7 @@ static struct platform_driver brcmstb_gisb_arb_driver = {
.name = "brcm-gisb-arb",
.of_match_table = brcmstb_gisb_arb_of_match,
.pm = &brcmstb_gisb_arb_pm_ops,
+ .dev_groups = gisb_arb_sysfs_groups,
},
};
diff --git a/drivers/bus/bt1-apb.c b/drivers/bus/bt1-apb.c
index e97c1d1c7578..7463124b6dd9 100644
--- a/drivers/bus/bt1-apb.c
+++ b/drivers/bus/bt1-apb.c
@@ -22,7 +22,6 @@
#include <linux/clk.h>
#include <linux/reset.h>
#include <linux/time64.h>
-#include <linux/clk.h>
#include <linux/sysfs.h>
#define APB_EHB_ISR 0x00
@@ -186,34 +185,13 @@ static int bt1_apb_request_rst(struct bt1_apb *apb)
return ret;
}
-static void bt1_apb_disable_clk(void *data)
-{
- struct bt1_apb *apb = data;
-
- clk_disable_unprepare(apb->pclk);
-}
-
static int bt1_apb_request_clk(struct bt1_apb *apb)
{
- int ret;
-
- apb->pclk = devm_clk_get(apb->dev, "pclk");
+ apb->pclk = devm_clk_get_enabled(apb->dev, "pclk");
if (IS_ERR(apb->pclk))
return dev_err_probe(apb->dev, PTR_ERR(apb->pclk),
"Couldn't get APB clock descriptor\n");
- ret = clk_prepare_enable(apb->pclk);
- if (ret) {
- dev_err(apb->dev, "Couldn't enable the APB clock\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(apb->dev, bt1_apb_disable_clk, apb);
- if (ret) {
- dev_err(apb->dev, "Can't add APB EHB clocks disable action\n");
- return ret;
- }
-
apb->rate = clk_get_rate(apb->pclk);
if (!apb->rate) {
dev_err(apb->dev, "Invalid clock rate\n");
diff --git a/drivers/bus/bt1-axi.c b/drivers/bus/bt1-axi.c
index 4007e7322cf2..a5254c73bf43 100644
--- a/drivers/bus/bt1-axi.c
+++ b/drivers/bus/bt1-axi.c
@@ -146,33 +146,14 @@ static int bt1_axi_request_rst(struct bt1_axi *axi)
return ret;
}
-static void bt1_axi_disable_clk(void *data)
-{
- struct bt1_axi *axi = data;
-
- clk_disable_unprepare(axi->aclk);
-}
-
static int bt1_axi_request_clk(struct bt1_axi *axi)
{
- int ret;
-
- axi->aclk = devm_clk_get(axi->dev, "aclk");
+ axi->aclk = devm_clk_get_enabled(axi->dev, "aclk");
if (IS_ERR(axi->aclk))
return dev_err_probe(axi->dev, PTR_ERR(axi->aclk),
"Couldn't get AXI Interconnect clock\n");
- ret = clk_prepare_enable(axi->aclk);
- if (ret) {
- dev_err(axi->dev, "Couldn't enable the AXI clock\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(axi->dev, bt1_axi_disable_clk, axi);
- if (ret)
- dev_err(axi->dev, "Can't add AXI clock disable action\n");
-
- return ret;
+ return 0;
}
static int bt1_axi_request_irq(struct bt1_axi *axi)
diff --git a/drivers/bus/fsl-mc/dpmcp.c b/drivers/bus/fsl-mc/dpmcp.c
index 5fbd0dbde24a..7816c0a728ef 100644
--- a/drivers/bus/fsl-mc/dpmcp.c
+++ b/drivers/bus/fsl-mc/dpmcp.c
@@ -75,25 +75,3 @@ int dpmcp_close(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-
-/**
- * dpmcp_reset() - Reset the DPMCP, returns the object to initial state.
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPMCP object
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpmcp_reset(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token)
-{
- struct fsl_mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_RESET,
- cmd_flags, token);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index 4b68c84ef485..c63a7e688db6 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -22,8 +22,8 @@ struct fsl_mc_child_objs {
struct fsl_mc_obj_desc *child_array;
};
-static bool fsl_mc_device_match(struct fsl_mc_device *mc_dev,
- struct fsl_mc_obj_desc *obj_desc)
+static bool fsl_mc_device_match(const struct fsl_mc_device *mc_dev,
+ const struct fsl_mc_obj_desc *obj_desc)
{
return mc_dev->obj_desc.id == obj_desc->id &&
strcmp(mc_dev->obj_desc.type, obj_desc->type) == 0;
@@ -112,9 +112,9 @@ void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev,
}
EXPORT_SYMBOL_GPL(dprc_remove_devices);
-static int __fsl_mc_device_match(struct device *dev, void *data)
+static int __fsl_mc_device_match(struct device *dev, const void *data)
{
- struct fsl_mc_obj_desc *obj_desc = data;
+ const struct fsl_mc_obj_desc *obj_desc = data;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
return fsl_mc_device_match(mc_dev, obj_desc);
@@ -806,8 +806,6 @@ int dprc_cleanup(struct fsl_mc_device *mc_dev)
dev_set_msi_domain(&mc_dev->dev, NULL);
}
- fsl_mc_cleanup_all_resource_pools(mc_dev);
-
/* if this step fails we cannot go further with cleanup as there is no way of
* communicating with the firmware
*/
diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c
index dd1b5c0fb7e2..38d40c09b719 100644
--- a/drivers/bus/fsl-mc/dprc.c
+++ b/drivers/bus/fsl-mc/dprc.c
@@ -489,7 +489,7 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
cmd_params->obj_id = cpu_to_le32(obj_id);
- strscpy_pad(cmd_params->obj_type, obj_type, 16);
+ strscpy(cmd_params->obj_type, obj_type);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -561,7 +561,7 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
cmd_params->obj_id = cpu_to_le32(obj_id);
cmd_params->region_index = region_index;
- strscpy_pad(cmd_params->obj_type, obj_type, 16);
+ strscpy(cmd_params->obj_type, obj_type);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c
index b5e8c021fa1f..d2ea59471323 100644
--- a/drivers/bus/fsl-mc/fsl-mc-allocator.c
+++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
@@ -555,27 +555,6 @@ void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
}
}
-static void fsl_mc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev,
- enum fsl_mc_pool_type pool_type)
-{
- struct fsl_mc_resource *resource;
- struct fsl_mc_resource *next;
- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
- struct fsl_mc_resource_pool *res_pool =
- &mc_bus->resource_pools[pool_type];
-
- list_for_each_entry_safe(resource, next, &res_pool->free_list, node)
- devm_kfree(&mc_bus_dev->dev, resource);
-}
-
-void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
-{
- int pool_type;
-
- for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++)
- fsl_mc_cleanup_resource_pool(mc_bus_dev, pool_type);
-}
-
/*
* fsl_mc_allocator_probe - callback invoked when an allocatable device is
* being added to the system
@@ -656,8 +635,3 @@ int __init fsl_mc_allocator_driver_init(void)
{
return fsl_mc_driver_register(&fsl_mc_allocator_driver);
}
-
-void fsl_mc_allocator_driver_exit(void)
-{
- fsl_mc_driver_unregister(&fsl_mc_allocator_driver);
-}
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 78b96cd63de9..7671bd158545 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -80,11 +80,11 @@ static phys_addr_t mc_portal_base_phys_addr;
*
* Returns 1 on success, 0 otherwise.
*/
-static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
+static int fsl_mc_bus_match(struct device *dev, const struct device_driver *drv)
{
const struct fsl_mc_device_id *id;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
+ const struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
bool found = false;
/* When driver_override is set, only bind to the matching driver */
@@ -139,9 +139,9 @@ static int fsl_mc_bus_uevent(const struct device *dev, struct kobj_uevent_env *e
static int fsl_mc_dma_configure(struct device *dev)
{
+ const struct device_driver *drv = READ_ONCE(dev->driver);
struct device *dma_dev = dev;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
u32 input_id = mc_dev->icid;
int ret;
@@ -153,7 +153,8 @@ static int fsl_mc_dma_configure(struct device *dev)
else
ret = acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id);
- if (!ret && !mc_drv->driver_managed_dma) {
+ /* @drv may not be valid when we're called from the IOMMU layer */
+ if (!ret && drv && !to_fsl_mc_driver(drv)->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
@@ -309,7 +310,7 @@ static struct attribute *fsl_mc_bus_attrs[] = {
ATTRIBUTE_GROUPS(fsl_mc_bus);
-struct bus_type fsl_mc_bus_type = {
+const struct bus_type fsl_mc_bus_type = {
.name = "fsl-mc",
.match = fsl_mc_bus_match,
.uevent = fsl_mc_bus_uevent,
@@ -320,90 +321,90 @@ struct bus_type fsl_mc_bus_type = {
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
-struct device_type fsl_mc_bus_dprc_type = {
+const struct device_type fsl_mc_bus_dprc_type = {
.name = "fsl_mc_bus_dprc"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dprc_type);
-struct device_type fsl_mc_bus_dpni_type = {
+const struct device_type fsl_mc_bus_dpni_type = {
.name = "fsl_mc_bus_dpni"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpni_type);
-struct device_type fsl_mc_bus_dpio_type = {
+const struct device_type fsl_mc_bus_dpio_type = {
.name = "fsl_mc_bus_dpio"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpio_type);
-struct device_type fsl_mc_bus_dpsw_type = {
+const struct device_type fsl_mc_bus_dpsw_type = {
.name = "fsl_mc_bus_dpsw"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpsw_type);
-struct device_type fsl_mc_bus_dpbp_type = {
+const struct device_type fsl_mc_bus_dpbp_type = {
.name = "fsl_mc_bus_dpbp"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpbp_type);
-struct device_type fsl_mc_bus_dpcon_type = {
+const struct device_type fsl_mc_bus_dpcon_type = {
.name = "fsl_mc_bus_dpcon"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpcon_type);
-struct device_type fsl_mc_bus_dpmcp_type = {
+const struct device_type fsl_mc_bus_dpmcp_type = {
.name = "fsl_mc_bus_dpmcp"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmcp_type);
-struct device_type fsl_mc_bus_dpmac_type = {
+const struct device_type fsl_mc_bus_dpmac_type = {
.name = "fsl_mc_bus_dpmac"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmac_type);
-struct device_type fsl_mc_bus_dprtc_type = {
+const struct device_type fsl_mc_bus_dprtc_type = {
.name = "fsl_mc_bus_dprtc"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dprtc_type);
-struct device_type fsl_mc_bus_dpseci_type = {
+const struct device_type fsl_mc_bus_dpseci_type = {
.name = "fsl_mc_bus_dpseci"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpseci_type);
-struct device_type fsl_mc_bus_dpdmux_type = {
+const struct device_type fsl_mc_bus_dpdmux_type = {
.name = "fsl_mc_bus_dpdmux"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmux_type);
-struct device_type fsl_mc_bus_dpdcei_type = {
+const struct device_type fsl_mc_bus_dpdcei_type = {
.name = "fsl_mc_bus_dpdcei"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdcei_type);
-struct device_type fsl_mc_bus_dpaiop_type = {
+const struct device_type fsl_mc_bus_dpaiop_type = {
.name = "fsl_mc_bus_dpaiop"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpaiop_type);
-struct device_type fsl_mc_bus_dpci_type = {
+const struct device_type fsl_mc_bus_dpci_type = {
.name = "fsl_mc_bus_dpci"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpci_type);
-struct device_type fsl_mc_bus_dpdmai_type = {
+const struct device_type fsl_mc_bus_dpdmai_type = {
.name = "fsl_mc_bus_dpdmai"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmai_type);
-struct device_type fsl_mc_bus_dpdbg_type = {
+const struct device_type fsl_mc_bus_dpdbg_type = {
.name = "fsl_mc_bus_dpdbg"
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdbg_type);
-static struct device_type *fsl_mc_get_device_type(const char *type)
+static const struct device_type *fsl_mc_get_device_type(const char *type)
{
static const struct {
- struct device_type *dev_type;
+ const struct device_type *dev_type;
const char *type;
} dev_types[] = {
{ &fsl_mc_bus_dprc_type, "dprc" },
@@ -905,8 +906,10 @@ int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
error_cleanup_dev:
kfree(mc_dev->regions);
- kfree(mc_bus);
- kfree(mc_dev);
+ if (mc_bus)
+ kfree(mc_bus);
+ else
+ kfree(mc_dev);
return error;
}
@@ -1210,7 +1213,7 @@ static struct platform_driver fsl_mc_bus_driver = {
.acpi_match_table = fsl_mc_bus_acpi_match_table,
},
.probe = fsl_mc_bus_probe,
- .remove_new = fsl_mc_bus_remove,
+ .remove = fsl_mc_bus_remove,
.shutdown = fsl_mc_bus_remove,
};
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
index b3520ea1b9f4..beed4c53533d 100644
--- a/drivers/bus/fsl-mc/fsl-mc-private.h
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -66,10 +66,6 @@ int dpmcp_close(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token);
-int dpmcp_reset(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token);
-
/*
* Data Path Resource Container (DPRC) API
*/
@@ -631,12 +627,8 @@ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
int __init fsl_mc_allocator_driver_init(void);
-void fsl_mc_allocator_driver_exit(void);
-
void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
-void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev);
-
int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
enum fsl_mc_pool_type pool_type,
struct fsl_mc_resource
diff --git a/drivers/bus/fsl-mc/fsl-mc-uapi.c b/drivers/bus/fsl-mc/fsl-mc-uapi.c
index 9c4c1395fcdb..823969e4159c 100644
--- a/drivers/bus/fsl-mc/fsl-mc-uapi.c
+++ b/drivers/bus/fsl-mc/fsl-mc-uapi.c
@@ -48,6 +48,7 @@ enum fsl_mc_cmd_index {
DPRC_GET_POOL,
DPRC_GET_POOL_COUNT,
DPRC_GET_CONNECTION,
+ DPRC_GET_MEM,
DPCI_GET_LINK_STATE,
DPCI_GET_PEER_ATTR,
DPAIOP_GET_SL_VERSION,
@@ -194,6 +195,12 @@ static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = {
.token = true,
.size = 32,
},
+ [DPRC_GET_MEM] = {
+ .cmdid_value = 0x16D0,
+ .cmdid_mask = 0xFFF0,
+ .token = true,
+ .size = 12,
+ },
[DPCI_GET_LINK_STATE] = {
.cmdid_value = 0x0E10,
@@ -275,13 +282,13 @@ static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = {
.size = 8,
},
[DPSW_GET_TAILDROP] = {
- .cmdid_value = 0x0A80,
+ .cmdid_value = 0x0A90,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 14,
},
[DPSW_SET_TAILDROP] = {
- .cmdid_value = 0x0A90,
+ .cmdid_value = 0x0A80,
.cmdid_mask = 0xFFF0,
.token = true,
.size = 24,
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
index 95b10a6cf307..cd8754763f40 100644
--- a/drivers/bus/fsl-mc/mc-io.c
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -214,12 +214,19 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
if (error < 0)
goto error_cleanup_resource;
- dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
- &dpmcp_dev->dev,
- DL_FLAG_AUTOREMOVE_CONSUMER);
- if (!dpmcp_dev->consumer_link) {
- error = -EINVAL;
- goto error_cleanup_mc_io;
+ /* If the DPRC device itself tries to allocate a portal (usually for
+ * UAPI interaction), don't add a device link between them since the
+ * DPMCP device is an actual child device of the DPRC and a reverse
+ * dependency is not allowed.
+ */
+ if (mc_dev != mc_bus_dev) {
+ dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
+ &dpmcp_dev->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!dpmcp_dev->consumer_link) {
+ error = -EINVAL;
+ goto error_cleanup_mc_io;
+ }
}
*new_mc_io = mc_io;
@@ -263,23 +270,3 @@ void fsl_mc_portal_free(struct fsl_mc_io *mc_io)
dpmcp_dev->consumer_link = NULL;
}
EXPORT_SYMBOL_GPL(fsl_mc_portal_free);
-
-/**
- * fsl_mc_portal_reset - Resets the dpmcp object for a given fsl_mc_io object
- *
- * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free
- */
-int fsl_mc_portal_reset(struct fsl_mc_io *mc_io)
-{
- int error;
- struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
-
- error = dpmcp_reset(mc_io, 0, dpmcp_dev->mc_handle);
- if (error < 0) {
- dev_err(&dpmcp_dev->dev, "dpmcp_reset() failed: %d\n", error);
- return error;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(fsl_mc_portal_reset);
diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c
index f2052cd0a051..b22c59d57c8f 100644
--- a/drivers/bus/fsl-mc/mc-sys.c
+++ b/drivers/bus/fsl-mc/mc-sys.c
@@ -19,7 +19,7 @@
/*
* Timeout in milliseconds to wait for the completion of an MC command
*/
-#define MC_CMD_COMPLETION_TIMEOUT_MS 500
+#define MC_CMD_COMPLETION_TIMEOUT_MS 15000
/*
* usleep_range() min and max values used to throttle down polling
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index 09340adbacc2..53dd1573e323 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -689,6 +689,6 @@ static struct platform_driver hisi_lpc_driver = {
.acpi_match_table = hisi_lpc_acpi_match,
},
.probe = hisi_lpc_probe,
- .remove_new = hisi_lpc_remove,
+ .remove = hisi_lpc_remove,
};
builtin_platform_driver(hisi_lpc_driver);
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 6b5da73c8541..83d623d97f5f 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -120,7 +120,7 @@ static int imx_weim_gpr_setup(struct platform_device *pdev)
i++;
}
- if (i == 0 || i % 4)
+ if (i == 0)
goto err;
for (i = 0; i < ARRAY_SIZE(gprvals); i++) {
@@ -282,22 +282,18 @@ static int weim_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, priv);
/* get the clock */
- clk = devm_clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
- ret = clk_prepare_enable(clk);
- if (ret)
- return ret;
-
/* parse the device node */
ret = weim_parse_dt(pdev);
if (ret)
- clk_disable_unprepare(clk);
- else
- dev_info(&pdev->dev, "Driver registered.\n");
+ return ret;
- return ret;
+ dev_info(&pdev->dev, "Driver registered.\n");
+
+ return 0;
}
#if IS_ENABLED(CONFIG_OF_DYNAMIC)
diff --git a/drivers/bus/mhi/common.h b/drivers/bus/mhi/common.h
index f794b9c8049e..dda340aaed95 100644
--- a/drivers/bus/mhi/common.h
+++ b/drivers/bus/mhi/common.h
@@ -297,30 +297,30 @@ struct mhi_ring_element {
__le32 dword[2];
};
+#define MHI_STATE_LIST \
+ mhi_state(RESET, "RESET") \
+ mhi_state(READY, "READY") \
+ mhi_state(M0, "M0") \
+ mhi_state(M1, "M1") \
+ mhi_state(M2, "M2") \
+ mhi_state(M3, "M3") \
+ mhi_state(M3_FAST, "M3_FAST") \
+ mhi_state(BHI, "BHI") \
+ mhi_state_end(SYS_ERR, "SYS ERROR")
+
+#undef mhi_state
+#undef mhi_state_end
+
+#define mhi_state(a, b) case MHI_STATE_##a: return b;
+#define mhi_state_end(a, b) case MHI_STATE_##a: return b;
+
static inline const char *mhi_state_str(enum mhi_state state)
{
switch (state) {
- case MHI_STATE_RESET:
- return "RESET";
- case MHI_STATE_READY:
- return "READY";
- case MHI_STATE_M0:
- return "M0";
- case MHI_STATE_M1:
- return "M1";
- case MHI_STATE_M2:
- return "M2";
- case MHI_STATE_M3:
- return "M3";
- case MHI_STATE_M3_FAST:
- return "M3 FAST";
- case MHI_STATE_BHI:
- return "BHI";
- case MHI_STATE_SYS_ERR:
- return "SYS ERROR";
+ MHI_STATE_LIST
default:
return "Unknown state";
}
-};
+}
#endif /* _MHI_COMMON_H */
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index 65fc1d738bec..b3eafcf2a2c5 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -90,7 +90,7 @@ static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct m
struct mhi_ring_element *event;
int ret;
- event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
if (!event)
return -ENOMEM;
@@ -109,7 +109,7 @@ int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_stat
struct mhi_ring_element *event;
int ret;
- event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
if (!event)
return -ENOMEM;
@@ -127,7 +127,7 @@ int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_e
struct mhi_ring_element *event;
int ret;
- event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
if (!event)
return -ENOMEM;
@@ -146,7 +146,7 @@ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_e
struct mhi_ring_element *event;
int ret;
- event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
+ event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL);
if (!event)
return -ENOMEM;
@@ -438,7 +438,7 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
write_offset = len - buf_left;
- buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA);
+ buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL);
if (!buf_addr)
return -ENOMEM;
@@ -1149,8 +1149,9 @@ int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl)
mhi_ep_mmio_mask_interrupts(mhi_cntrl);
mhi_ep_mmio_init(mhi_cntrl);
- mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)),
- GFP_KERNEL);
+ mhi_cntrl->mhi_event = kcalloc(mhi_cntrl->event_rings,
+ sizeof(*mhi_cntrl->mhi_event),
+ GFP_KERNEL);
if (!mhi_cntrl->mhi_event)
return -ENOMEM;
@@ -1480,14 +1481,14 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el",
sizeof(struct mhi_ring_element), 0,
- SLAB_CACHE_DMA, NULL);
+ 0, NULL);
if (!mhi_cntrl->ev_ring_el_cache) {
ret = -ENOMEM;
goto err_free_cmd;
}
mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0,
- SLAB_CACHE_DMA, NULL);
+ 0, NULL);
if (!mhi_cntrl->tre_buf_cache) {
ret = -ENOMEM;
goto err_destroy_ev_ring_el_cache;
@@ -1496,7 +1497,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
mhi_cntrl->ring_item_cache = kmem_cache_create("mhi_ep_ring_item",
sizeof(struct mhi_ep_ring_item), 0,
0, NULL);
- if (!mhi_cntrl->ev_ring_el_cache) {
+ if (!mhi_cntrl->ring_item_cache) {
ret = -ENOMEM;
goto err_destroy_tre_buf_cache;
}
@@ -1693,10 +1694,10 @@ static int mhi_ep_uevent(const struct device *dev, struct kobj_uevent_env *env)
mhi_dev->name);
}
-static int mhi_ep_match(struct device *dev, struct device_driver *drv)
+static int mhi_ep_match(struct device *dev, const struct device_driver *drv)
{
struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
- struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv);
+ const struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv);
const struct mhi_device_id *id;
/*
diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c
index aeb53b2c34a8..26357ee68dee 100644
--- a/drivers/bus/mhi/ep/ring.c
+++ b/drivers/bus/mhi/ep/ring.c
@@ -131,19 +131,23 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e
}
old_offset = ring->rd_offset;
- mhi_ep_ring_inc_index(ring);
dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset);
+ buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el));
+ buf_info.dev_addr = el;
+ buf_info.size = sizeof(*el);
+
+ ret = mhi_cntrl->write_sync(mhi_cntrl, &buf_info);
+ if (ret)
+ return ret;
+
+ mhi_ep_ring_inc_index(ring);
/* Update rp in ring context */
rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase);
memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64));
- buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el));
- buf_info.dev_addr = el;
- buf_info.size = sizeof(*el);
-
- return mhi_cntrl->write_sync(mhi_cntrl, &buf_info);
+ return ret;
}
void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c
index edc0ec5a0933..efa3b6dddf4d 100644
--- a/drivers/bus/mhi/host/boot.c
+++ b/drivers/bus/mhi/host/boot.c
@@ -82,9 +82,9 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
* other cores to shutdown while we're collecting RDDM buffer. After
* returning from this function, we expect the device to reset.
*
- * Normaly, we read/write pm_state only after grabbing the
+ * Normally, we read/write pm_state only after grabbing the
* pm_lock, since we're in a panic, skipping it. Also there is no
- * gurantee that this state change would take effect since
+ * guarantee that this state change would take effect since
* we're setting it w/o grabbing pm_lock
*/
mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
@@ -177,6 +177,36 @@ int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic)
}
EXPORT_SYMBOL_GPL(mhi_download_rddm_image);
+static void mhi_fw_load_error_dump(struct mhi_controller *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
+ void __iomem *base = mhi_cntrl->bhi;
+ int ret, i;
+ u32 val;
+ struct {
+ char *name;
+ u32 offset;
+ } error_reg[] = {
+ { "ERROR_CODE", BHI_ERRCODE },
+ { "ERROR_DBG1", BHI_ERRDBG1 },
+ { "ERROR_DBG2", BHI_ERRDBG2 },
+ { "ERROR_DBG3", BHI_ERRDBG3 },
+ { NULL },
+ };
+
+ read_lock_bh(pm_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ for (i = 0; error_reg[i].name; i++) {
+ ret = mhi_read_reg(mhi_cntrl, base, error_reg[i].offset, &val);
+ if (ret)
+ break;
+ dev_err(dev, "Reg: %s value: 0x%x\n", error_reg[i].name, val);
+ }
+ }
+ read_unlock_bh(pm_lock);
+}
+
static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl,
const struct mhi_buf *mhi_buf)
{
@@ -226,24 +256,13 @@ static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl,
}
static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
- dma_addr_t dma_addr,
- size_t size)
+ const struct mhi_buf *mhi_buf)
{
- u32 tx_status, val, session_id;
- int i, ret;
- void __iomem *base = mhi_cntrl->bhi;
- rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
- struct {
- char *name;
- u32 offset;
- } error_reg[] = {
- { "ERROR_CODE", BHI_ERRCODE },
- { "ERROR_DBG1", BHI_ERRDBG1 },
- { "ERROR_DBG2", BHI_ERRDBG2 },
- { "ERROR_DBG3", BHI_ERRDBG3 },
- { NULL },
- };
+ rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
+ void __iomem *base = mhi_cntrl->bhi;
+ u32 tx_status, session_id;
+ int ret;
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
@@ -255,11 +274,9 @@ static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
dev_dbg(dev, "Starting image download via BHI. Session ID: %u\n",
session_id);
mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
- mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
- upper_32_bits(dma_addr));
- mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
- lower_32_bits(dma_addr));
- mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, upper_32_bits(mhi_buf->dma_addr));
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, lower_32_bits(mhi_buf->dma_addr));
+ mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, mhi_buf->len);
mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id);
read_unlock_bh(pm_lock);
@@ -274,18 +291,7 @@ static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
if (tx_status == BHI_STATUS_ERROR) {
dev_err(dev, "Image transfer failed\n");
- read_lock_bh(pm_lock);
- if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
- for (i = 0; error_reg[i].name; i++) {
- ret = mhi_read_reg(mhi_cntrl, base,
- error_reg[i].offset, &val);
- if (ret)
- break;
- dev_err(dev, "Reg: %s value: 0x%x\n",
- error_reg[i].name, val);
- }
- }
- read_unlock_bh(pm_lock);
+ mhi_fw_load_error_dump(mhi_cntrl);
goto invalid_pm_state;
}
@@ -296,6 +302,16 @@ invalid_pm_state:
return -EIO;
}
+static void mhi_free_bhi_buffer(struct mhi_controller *mhi_cntrl,
+ struct image_info *image_info)
+{
+ struct mhi_buf *mhi_buf = image_info->mhi_buf;
+
+ dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, mhi_buf->buf, mhi_buf->dma_addr);
+ kfree(image_info->mhi_buf);
+ kfree(image_info);
+}
+
void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info *image_info)
{
@@ -310,6 +326,45 @@ void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
kfree(image_info);
}
+static int mhi_alloc_bhi_buffer(struct mhi_controller *mhi_cntrl,
+ struct image_info **image_info,
+ size_t alloc_size)
+{
+ struct image_info *img_info;
+ struct mhi_buf *mhi_buf;
+
+ img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
+ if (!img_info)
+ return -ENOMEM;
+
+ /* Allocate memory for entry */
+ img_info->mhi_buf = kzalloc(sizeof(*img_info->mhi_buf), GFP_KERNEL);
+ if (!img_info->mhi_buf)
+ goto error_alloc_mhi_buf;
+
+ /* Allocate and populate vector table */
+ mhi_buf = img_info->mhi_buf;
+
+ mhi_buf->len = alloc_size;
+ mhi_buf->buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
+ &mhi_buf->dma_addr, GFP_KERNEL);
+ if (!mhi_buf->buf)
+ goto error_alloc_segment;
+
+ img_info->bhi_vec = NULL;
+ img_info->entries = 1;
+ *image_info = img_info;
+
+ return 0;
+
+error_alloc_segment:
+ kfree(mhi_buf);
+error_alloc_mhi_buf:
+ kfree(img_info);
+
+ return -ENOMEM;
+}
+
int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info **image_info,
size_t alloc_size)
@@ -357,6 +412,7 @@ error_alloc_segment:
for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
mhi_buf->buf, mhi_buf->dma_addr);
+ kfree(img_info->mhi_buf);
error_alloc_mhi_buf:
kfree(img_info);
@@ -364,9 +420,9 @@ error_alloc_mhi_buf:
return -ENOMEM;
}
-static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
- const u8 *buf, size_t remainder,
- struct image_info *img_info)
+static void mhi_firmware_copy_bhie(struct mhi_controller *mhi_cntrl,
+ const u8 *buf, size_t remainder,
+ struct image_info *img_info)
{
size_t to_cpy;
struct mhi_buf *mhi_buf = img_info->mhi_buf;
@@ -385,17 +441,63 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
}
}
+static enum mhi_fw_load_type mhi_fw_load_type_get(const struct mhi_controller *mhi_cntrl)
+{
+ if (mhi_cntrl->fbc_download) {
+ return MHI_FW_LOAD_FBC;
+ } else {
+ if (mhi_cntrl->seg_len)
+ return MHI_FW_LOAD_BHIE;
+ else
+ return MHI_FW_LOAD_BHI;
+ }
+}
+
+static int mhi_load_image_bhi(struct mhi_controller *mhi_cntrl, const u8 *fw_data, size_t size)
+{
+ struct image_info *image;
+ int ret;
+
+ ret = mhi_alloc_bhi_buffer(mhi_cntrl, &image, size);
+ if (ret)
+ return ret;
+
+ /* Load the firmware into BHI vec table */
+ memcpy(image->mhi_buf->buf, fw_data, size);
+
+ ret = mhi_fw_load_bhi(mhi_cntrl, &image->mhi_buf[image->entries - 1]);
+ mhi_free_bhi_buffer(mhi_cntrl, image);
+
+ return ret;
+}
+
+static int mhi_load_image_bhie(struct mhi_controller *mhi_cntrl, const u8 *fw_data, size_t size)
+{
+ struct image_info *image;
+ int ret;
+
+ ret = mhi_alloc_bhie_table(mhi_cntrl, &image, size);
+ if (ret)
+ return ret;
+
+ mhi_firmware_copy_bhie(mhi_cntrl, fw_data, size, image);
+
+ ret = mhi_fw_load_bhie(mhi_cntrl, &image->mhi_buf[image->entries - 1]);
+ mhi_free_bhie_table(mhi_cntrl, image);
+
+ return ret;
+}
+
void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
{
const struct firmware *firmware = NULL;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_fw_load_type fw_load_type;
enum mhi_pm_state new_state;
const char *fw_name;
const u8 *fw_data;
- void *buf;
- dma_addr_t dma_addr;
size_t size, fw_sz;
- int i, ret;
+ int ret;
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
dev_err(dev, "Device MHI is not in valid state\n");
@@ -408,15 +510,6 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
if (ret)
dev_err(dev, "Could not capture serial number via BHI\n");
- for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) {
- ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i),
- &mhi_cntrl->oem_pk_hash[i]);
- if (ret) {
- dev_err(dev, "Could not capture OEM PK HASH via BHI\n");
- break;
- }
- }
-
/* wait for ready on pass through or any other execution environment */
if (!MHI_FW_LOAD_CAPABLE(mhi_cntrl->ee))
goto fw_load_ready_state;
@@ -461,21 +554,17 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
fw_sz = firmware->size;
skip_req_fw:
- buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, &dma_addr,
- GFP_KERNEL);
- if (!buf) {
- release_firmware(firmware);
- goto error_fw_load;
- }
-
- /* Download image using BHI */
- memcpy(buf, fw_data, size);
- ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size);
- dma_free_coherent(mhi_cntrl->cntrl_dev, size, buf, dma_addr);
+ fw_load_type = mhi_fw_load_type_get(mhi_cntrl);
+ if (fw_load_type == MHI_FW_LOAD_BHIE)
+ ret = mhi_load_image_bhie(mhi_cntrl, fw_data, size);
+ else
+ ret = mhi_load_image_bhi(mhi_cntrl, fw_data, size);
/* Error or in EDL mode, we're done */
if (ret) {
- dev_err(dev, "MHI did not load image over BHI, ret: %d\n", ret);
+ dev_err(dev, "MHI did not load image over BHI%s, ret: %d\n",
+ fw_load_type == MHI_FW_LOAD_BHIE ? "e" : "",
+ ret);
release_firmware(firmware);
goto error_fw_load;
}
@@ -494,7 +583,7 @@ skip_req_fw:
* If we're doing fbc, populate vector tables while
* device transitioning into MHI READY state
*/
- if (mhi_cntrl->fbc_download) {
+ if (fw_load_type == MHI_FW_LOAD_FBC) {
ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, fw_sz);
if (ret) {
release_firmware(firmware);
@@ -502,7 +591,7 @@ skip_req_fw:
}
/* Load the firmware into BHIE vec table */
- mhi_firmware_copy(mhi_cntrl, fw_data, fw_sz, mhi_cntrl->fbc_image);
+ mhi_firmware_copy_bhie(mhi_cntrl, fw_data, fw_sz, mhi_cntrl->fbc_image);
}
release_firmware(firmware);
@@ -519,7 +608,7 @@ fw_load_ready_state:
return;
error_ready_state:
- if (mhi_cntrl->fbc_download) {
+ if (mhi_cntrl->fbc_image) {
mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
mhi_cntrl->fbc_image = NULL;
}
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
index 65ceac1837f9..13e7a55f54ff 100644
--- a/drivers/bus/mhi/host/init.c
+++ b/drivers/bus/mhi/host/init.c
@@ -20,50 +20,49 @@
#include <linux/wait.h>
#include "internal.h"
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
static DEFINE_IDA(mhi_controller_ida);
+#undef mhi_ee
+#undef mhi_ee_end
+
+#define mhi_ee(a, b) [MHI_EE_##a] = b,
+#define mhi_ee_end(a, b) [MHI_EE_##a] = b,
+
const char * const mhi_ee_str[MHI_EE_MAX] = {
- [MHI_EE_PBL] = "PRIMARY BOOTLOADER",
- [MHI_EE_SBL] = "SECONDARY BOOTLOADER",
- [MHI_EE_AMSS] = "MISSION MODE",
- [MHI_EE_RDDM] = "RAMDUMP DOWNLOAD MODE",
- [MHI_EE_WFW] = "WLAN FIRMWARE",
- [MHI_EE_PTHRU] = "PASS THROUGH",
- [MHI_EE_EDL] = "EMERGENCY DOWNLOAD",
- [MHI_EE_FP] = "FLASH PROGRAMMER",
- [MHI_EE_DISABLE_TRANSITION] = "DISABLE",
- [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
+ MHI_EE_LIST
};
+#undef dev_st_trans
+#undef dev_st_trans_end
+
+#define dev_st_trans(a, b) [DEV_ST_TRANSITION_##a] = b,
+#define dev_st_trans_end(a, b) [DEV_ST_TRANSITION_##a] = b,
+
const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
- [DEV_ST_TRANSITION_PBL] = "PBL",
- [DEV_ST_TRANSITION_READY] = "READY",
- [DEV_ST_TRANSITION_SBL] = "SBL",
- [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION MODE",
- [DEV_ST_TRANSITION_FP] = "FLASH PROGRAMMER",
- [DEV_ST_TRANSITION_SYS_ERR] = "SYS ERROR",
- [DEV_ST_TRANSITION_DISABLE] = "DISABLE",
+ DEV_ST_TRANSITION_LIST
};
+#undef ch_state_type
+#undef ch_state_type_end
+
+#define ch_state_type(a, b) [MHI_CH_STATE_TYPE_##a] = b,
+#define ch_state_type_end(a, b) [MHI_CH_STATE_TYPE_##a] = b,
+
const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = {
- [MHI_CH_STATE_TYPE_RESET] = "RESET",
- [MHI_CH_STATE_TYPE_STOP] = "STOP",
- [MHI_CH_STATE_TYPE_START] = "START",
+ MHI_CH_STATE_TYPE_LIST
};
+#undef mhi_pm_state
+#undef mhi_pm_state_end
+
+#define mhi_pm_state(a, b) [MHI_PM_STATE_##a] = b,
+#define mhi_pm_state_end(a, b) [MHI_PM_STATE_##a] = b,
+
static const char * const mhi_pm_state_str[] = {
- [MHI_PM_STATE_DISABLE] = "DISABLE",
- [MHI_PM_STATE_POR] = "POWER ON RESET",
- [MHI_PM_STATE_M0] = "M0",
- [MHI_PM_STATE_M2] = "M2",
- [MHI_PM_STATE_M3_ENTER] = "M?->M3",
- [MHI_PM_STATE_M3] = "M3",
- [MHI_PM_STATE_M3_EXIT] = "M3->M0",
- [MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error",
- [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect",
- [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process",
- [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
- [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect",
+ MHI_PM_STATE_LIST
};
const char *to_mhi_pm_state_str(u32 state)
@@ -97,11 +96,19 @@ static ssize_t oem_pk_hash_show(struct device *dev,
{
struct mhi_device *mhi_dev = to_mhi_device(dev);
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
- int i, cnt = 0;
+ u32 hash_segment[MHI_MAX_OEM_PK_HASH_SEGMENTS];
+ int i, cnt = 0, ret;
+
+ for (i = 0; i < MHI_MAX_OEM_PK_HASH_SEGMENTS; i++) {
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i), &hash_segment[i]);
+ if (ret) {
+ dev_err(dev, "Could not capture OEM PK HASH\n");
+ return ret;
+ }
+ }
- for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++)
- cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n",
- i, mhi_cntrl->oem_pk_hash[i]);
+ for (i = 0; i < MHI_MAX_OEM_PK_HASH_SEGMENTS; i++)
+ cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n", i, hash_segment[i]);
return cnt;
}
@@ -120,6 +127,30 @@ static ssize_t soc_reset_store(struct device *dev,
}
static DEVICE_ATTR_WO(soc_reset);
+static ssize_t trigger_edl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = mhi_cntrl->edl_trigger(mhi_cntrl);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_WO(trigger_edl);
+
static struct attribute *mhi_dev_attrs[] = {
&dev_attr_serial_number.attr,
&dev_attr_oem_pk_hash.attr,
@@ -510,11 +541,9 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
dev_dbg(dev, "Initializing MHI registers\n");
/* Read channel db offset */
- ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, &val);
- if (ret) {
- dev_err(dev, "Unable to read CHDBOFF register\n");
- return -EIO;
- }
+ ret = mhi_get_channel_doorbell_offset(mhi_cntrl, &val);
+ if (ret)
+ return ret;
if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) {
dev_err(dev, "CHDB offset: 0x%x is out of range: 0x%zx\n",
@@ -907,7 +936,6 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan;
struct mhi_cmd *mhi_cmd;
struct mhi_device *mhi_dev;
- u32 soc_info;
int ret, i;
if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
@@ -982,17 +1010,6 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
}
- /* Read the MHI device info */
- ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
- SOC_HW_VERSION_OFFS, &soc_info);
- if (ret)
- goto err_destroy_wq;
-
- mhi_cntrl->family_number = FIELD_GET(SOC_HW_VERSION_FAM_NUM_BMSK, soc_info);
- mhi_cntrl->device_number = FIELD_GET(SOC_HW_VERSION_DEV_NUM_BMSK, soc_info);
- mhi_cntrl->major_version = FIELD_GET(SOC_HW_VERSION_MAJOR_VER_BMSK, soc_info);
- mhi_cntrl->minor_version = FIELD_GET(SOC_HW_VERSION_MINOR_VER_BMSK, soc_info);
-
mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL);
if (mhi_cntrl->index < 0) {
ret = mhi_cntrl->index;
@@ -1023,6 +1040,12 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
if (ret)
goto err_release_dev;
+ if (mhi_cntrl->edl_trigger) {
+ ret = sysfs_create_file(&mhi_dev->dev.kobj, &dev_attr_trigger_edl.attr);
+ if (ret)
+ goto err_release_dev;
+ }
+
mhi_cntrl->mhi_dev = mhi_dev;
mhi_create_debugfs(mhi_cntrl);
@@ -1056,6 +1079,9 @@ void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
mhi_deinit_free_irq(mhi_cntrl);
mhi_destroy_debugfs(mhi_cntrl);
+ if (mhi_cntrl->edl_trigger)
+ sysfs_remove_file(&mhi_dev->dev.kobj, &dev_attr_trigger_edl.attr);
+
destroy_workqueue(mhi_cntrl->hiprio_wq);
kfree(mhi_cntrl->mhi_cmd);
kfree(mhi_cntrl->mhi_event);
@@ -1118,7 +1144,7 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
}
mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off;
- if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) {
+ if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size || mhi_cntrl->seg_len) {
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
&bhie_off);
if (ret) {
@@ -1416,10 +1442,10 @@ static int mhi_uevent(const struct device *dev, struct kobj_uevent_env *env)
mhi_dev->name);
}
-static int mhi_match(struct device *dev, struct device_driver *drv)
+static int mhi_match(struct device *dev, const struct device_driver *drv)
{
struct mhi_device *mhi_dev = to_mhi_device(dev);
- struct mhi_driver *mhi_drv = to_mhi_driver(drv);
+ const struct mhi_driver *mhi_drv = to_mhi_driver(drv);
const struct mhi_device_id *id;
/*
@@ -1438,7 +1464,7 @@ static int mhi_match(struct device *dev, struct device_driver *drv)
return 0;
};
-struct bus_type mhi_bus_type = {
+const struct bus_type mhi_bus_type = {
.name = "mhi",
.dev_name = "mhi",
.match = mhi_match,
diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
index 30ac415a3000..ce566f7d2e92 100644
--- a/drivers/bus/mhi/host/internal.h
+++ b/drivers/bus/mhi/host/internal.h
@@ -9,18 +9,12 @@
#include "../common.h"
-extern struct bus_type mhi_bus_type;
+extern const struct bus_type mhi_bus_type;
/* Host request register */
#define MHI_SOC_RESET_REQ_OFFSET 0xb0
#define MHI_SOC_RESET_REQ BIT(0)
-#define SOC_HW_VERSION_OFFS 0x224
-#define SOC_HW_VERSION_FAM_NUM_BMSK GENMASK(31, 28)
-#define SOC_HW_VERSION_DEV_NUM_BMSK GENMASK(27, 16)
-#define SOC_HW_VERSION_MAJOR_VER_BMSK GENMASK(15, 8)
-#define SOC_HW_VERSION_MINOR_VER_BMSK GENMASK(7, 0)
-
struct mhi_ctxt {
struct mhi_event_ctxt *er_ctxt;
struct mhi_chan_ctxt *chan_ctxt;
@@ -35,6 +29,13 @@ struct bhi_vec_entry {
u64 size;
};
+enum mhi_fw_load_type {
+ MHI_FW_LOAD_BHI, /* BHI only in PBL */
+ MHI_FW_LOAD_BHIE, /* BHIe only in PBL */
+ MHI_FW_LOAD_FBC, /* BHI in PBL followed by BHIe in SBL */
+ MHI_FW_LOAD_MAX,
+};
+
enum mhi_ch_state_type {
MHI_CH_STATE_TYPE_RESET,
MHI_CH_STATE_TYPE_STOP,
@@ -42,6 +43,11 @@ enum mhi_ch_state_type {
MHI_CH_STATE_TYPE_MAX,
};
+#define MHI_CH_STATE_TYPE_LIST \
+ ch_state_type(RESET, "RESET") \
+ ch_state_type(STOP, "STOP") \
+ ch_state_type_end(START, "START")
+
extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX];
#define TO_CH_STATE_TYPE_STR(state) (((state) >= MHI_CH_STATE_TYPE_MAX) ? \
"INVALID_STATE" : \
@@ -50,6 +56,18 @@ extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX];
#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \
mode != MHI_DB_BRST_ENABLE)
+#define MHI_EE_LIST \
+ mhi_ee(PBL, "PRIMARY BOOTLOADER") \
+ mhi_ee(SBL, "SECONDARY BOOTLOADER") \
+ mhi_ee(AMSS, "MISSION MODE") \
+ mhi_ee(RDDM, "RAMDUMP DOWNLOAD MODE")\
+ mhi_ee(WFW, "WLAN FIRMWARE") \
+ mhi_ee(PTHRU, "PASS THROUGH") \
+ mhi_ee(EDL, "EMERGENCY DOWNLOAD") \
+ mhi_ee(FP, "FLASH PROGRAMMER") \
+ mhi_ee(DISABLE_TRANSITION, "DISABLE") \
+ mhi_ee_end(NOT_SUPPORTED, "NOT SUPPORTED")
+
extern const char * const mhi_ee_str[MHI_EE_MAX];
#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \
"INVALID_EE" : mhi_ee_str[ee])
@@ -69,9 +87,20 @@ enum dev_st_transition {
DEV_ST_TRANSITION_FP,
DEV_ST_TRANSITION_SYS_ERR,
DEV_ST_TRANSITION_DISABLE,
+ DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE,
DEV_ST_TRANSITION_MAX,
};
+#define DEV_ST_TRANSITION_LIST \
+ dev_st_trans(PBL, "PBL") \
+ dev_st_trans(READY, "READY") \
+ dev_st_trans(SBL, "SBL") \
+ dev_st_trans(MISSION_MODE, "MISSION MODE") \
+ dev_st_trans(FP, "FLASH PROGRAMMER") \
+ dev_st_trans(SYS_ERR, "SYS ERROR") \
+ dev_st_trans(DISABLE, "DISABLE") \
+ dev_st_trans_end(DISABLE_DESTROY_DEVICE, "DISABLE (DESTROY DEVICE)")
+
extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX];
#define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \
"INVALID_STATE" : dev_state_tran_str[state])
@@ -88,11 +117,27 @@ enum mhi_pm_state {
MHI_PM_STATE_FW_DL_ERR,
MHI_PM_STATE_SYS_ERR_DETECT,
MHI_PM_STATE_SYS_ERR_PROCESS,
+ MHI_PM_STATE_SYS_ERR_FAIL,
MHI_PM_STATE_SHUTDOWN_PROCESS,
MHI_PM_STATE_LD_ERR_FATAL_DETECT,
MHI_PM_STATE_MAX
};
+#define MHI_PM_STATE_LIST \
+ mhi_pm_state(DISABLE, "DISABLE") \
+ mhi_pm_state(POR, "POWER ON RESET") \
+ mhi_pm_state(M0, "M0") \
+ mhi_pm_state(M2, "M2") \
+ mhi_pm_state(M3_ENTER, "M?->M3") \
+ mhi_pm_state(M3, "M3") \
+ mhi_pm_state(M3_EXIT, "M3->M0") \
+ mhi_pm_state(FW_DL_ERR, "Firmware Download Error") \
+ mhi_pm_state(SYS_ERR_DETECT, "SYS ERROR Detect") \
+ mhi_pm_state(SYS_ERR_PROCESS, "SYS ERROR Process") \
+ mhi_pm_state(SYS_ERR_FAIL, "SYS ERROR Failure") \
+ mhi_pm_state(SHUTDOWN_PROCESS, "SHUTDOWN Process") \
+ mhi_pm_state_end(LD_ERR_FATAL_DETECT, "Linkdown or Error Fatal Detect")
+
#define MHI_PM_DISABLE BIT(0)
#define MHI_PM_POR BIT(1)
#define MHI_PM_M0 BIT(2)
@@ -104,14 +149,16 @@ enum mhi_pm_state {
#define MHI_PM_FW_DL_ERR BIT(7)
#define MHI_PM_SYS_ERR_DETECT BIT(8)
#define MHI_PM_SYS_ERR_PROCESS BIT(9)
-#define MHI_PM_SHUTDOWN_PROCESS BIT(10)
+#define MHI_PM_SYS_ERR_FAIL BIT(10)
+#define MHI_PM_SHUTDOWN_PROCESS BIT(11)
/* link not accessible */
-#define MHI_PM_LD_ERR_FATAL_DETECT BIT(11)
+#define MHI_PM_LD_ERR_FATAL_DETECT BIT(12)
#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
- MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
+ MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS | \
+ MHI_PM_FW_DL_ERR)))
#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
#define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & mhi_cntrl->db_access)
@@ -215,7 +262,7 @@ struct mhi_chan {
/*
* Important: When consuming, increment tre_ring first and when
* releasing, decrement buf_ring first. If tre_ring has space, buf_ring
- * is guranteed to have space so we do not need to check both rings.
+ * is guaranteed to have space so we do not need to check both rings.
*/
struct mhi_ring buf_ring;
struct mhi_ring tre_ring;
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
index abb561db9ae1..9bb0df43ceef 100644
--- a/drivers/bus/mhi/host/main.c
+++ b/drivers/bus/mhi/host/main.c
@@ -15,6 +15,7 @@
#include <linux/skbuff.h>
#include <linux/slab.h>
#include "internal.h"
+#include "trace.h"
int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
void __iomem *base, u32 offset, u32 *out)
@@ -493,11 +494,8 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
state = mhi_get_mhi_state(mhi_cntrl);
ee = mhi_get_exec_env(mhi_cntrl);
- dev_dbg(dev, "local ee: %s state: %s device ee: %s state: %s\n",
- TO_MHI_EXEC_STR(mhi_cntrl->ee),
- mhi_state_str(mhi_cntrl->dev_state),
- TO_MHI_EXEC_STR(ee), mhi_state_str(state));
+ trace_mhi_intvec_states(mhi_cntrl, ee, state);
if (state == MHI_STATE_SYS_ERR) {
dev_dbg(dev, "System error detected\n");
pm_state = mhi_tryset_pm_state(mhi_cntrl,
@@ -838,6 +836,8 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
while (dev_rp != local_rp) {
enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
+ trace_mhi_ctrl_event(mhi_cntrl, local_rp);
+
switch (type) {
case MHI_PKT_TYPE_BW_REQ_EVENT:
{
@@ -1003,6 +1003,8 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
while (dev_rp != local_rp && event_quota > 0) {
enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
+ trace_mhi_data_event(mhi_cntrl, local_rp);
+
chan = MHI_TRE_GET_EV_CHID(local_rp);
WARN_ON(chan >= mhi_cntrl->max_chan);
@@ -1179,25 +1181,6 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
}
EXPORT_SYMBOL_GPL(mhi_queue_skb);
-int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
- struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
-{
- struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
- mhi_dev->dl_chan;
- struct mhi_buf_info buf_info = { };
-
- buf_info.p_addr = mhi_buf->dma_addr;
- buf_info.cb_buf = mhi_buf;
- buf_info.pre_mapped = true;
- buf_info.len = len;
-
- if (unlikely(mhi_chan->pre_alloc))
- return -EINVAL;
-
- return mhi_queue(mhi_dev, &buf_info, dir, mflags);
-}
-EXPORT_SYMBOL_GPL(mhi_queue_dma);
-
int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
struct mhi_buf_info *info, enum mhi_flags flags)
{
@@ -1205,11 +1188,16 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
struct mhi_ring_element *mhi_tre;
struct mhi_buf_info *buf_info;
int eot, eob, chain, bei;
- int ret;
+ int ret = 0;
/* Protect accesses for reading and incrementing WP */
write_lock_bh(&mhi_chan->lock);
+ if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
+ ret = -ENODEV;
+ goto out;
+ }
+
buf_ring = &mhi_chan->buf_ring;
tre_ring = &mhi_chan->tre_ring;
@@ -1227,10 +1215,8 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
if (!info->pre_mapped) {
ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
- if (ret) {
- write_unlock_bh(&mhi_chan->lock);
- return ret;
- }
+ if (ret)
+ goto out;
}
eob = !!(flags & MHI_EOB);
@@ -1243,13 +1229,15 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
+ trace_mhi_gen_tre(mhi_cntrl, mhi_chan, mhi_tre);
/* increment WP */
mhi_add_ring_element(mhi_cntrl, tre_ring);
mhi_add_ring_element(mhi_cntrl, buf_ring);
+out:
write_unlock_bh(&mhi_chan->lock);
- return 0;
+ return ret;
}
int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
@@ -1337,9 +1325,7 @@ static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
enum mhi_cmd_type cmd = MHI_CMD_NOP;
int ret;
- dev_dbg(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan,
- TO_CH_STATE_TYPE_STR(to_state));
-
+ trace_mhi_channel_command_start(mhi_cntrl, mhi_chan, to_state, TPS("Updating"));
switch (to_state) {
case MHI_CH_STATE_TYPE_RESET:
write_lock_irq(&mhi_chan->lock);
@@ -1406,9 +1392,7 @@ static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
write_unlock_irq(&mhi_chan->lock);
}
- dev_dbg(dev, "%d: Channel state change to %s successful\n",
- mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
-
+ trace_mhi_channel_command_end(mhi_cntrl, mhi_chan, to_state, TPS("Updated"));
exit_channel_update:
mhi_cntrl->runtime_put(mhi_cntrl);
mhi_device_put(mhi_cntrl->mhi_dev);
@@ -1692,3 +1676,19 @@ void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
}
}
EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
+
+int mhi_get_channel_doorbell_offset(struct mhi_controller *mhi_cntrl, u32 *chdb_offset)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ void __iomem *base = mhi_cntrl->regs;
+ int ret;
+
+ ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, chdb_offset);
+ if (ret) {
+ dev_err(dev, "Unable to read CHDBOFF register\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mhi_get_channel_doorbell_offset);
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index cd6cd14b3d29..589cb6722316 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -26,6 +26,10 @@
/* PCI VID definitions */
#define PCI_VENDOR_ID_THALES 0x1269
#define PCI_VENDOR_ID_QUECTEL 0x1eac
+#define PCI_VENDOR_ID_NETPRISMA 0x203e
+
+#define MHI_EDL_DB 91
+#define MHI_EDL_COOKIE 0xEDEDEDED
/**
* struct mhi_pci_dev_info - MHI PCI device specific information
@@ -33,6 +37,7 @@
* @name: name of the PCI module
* @fw: firmware path (if any)
* @edl: emergency download mode firmware path (if any)
+ * @edl_trigger: capable of triggering EDL mode in the device (if supported)
* @bar_num: PCI base address register to use for MHI MMIO register space
* @dma_data_width: DMA transfer word size (32 or 64 bits)
* @mru_default: default MRU size for MBIM network packets
@@ -44,6 +49,7 @@ struct mhi_pci_dev_info {
const char *name;
const char *fw;
const char *edl;
+ bool edl_trigger;
unsigned int bar_num;
unsigned int dma_data_width;
unsigned int mru_default;
@@ -239,6 +245,71 @@ struct mhi_pci_dev_info {
.channel = ch_num, \
}
+static const struct mhi_channel_config mhi_qcom_qdu100_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 2),
+ MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 2),
+ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 128, 1),
+ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 128, 1),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 3),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 3),
+ MHI_CHANNEL_CONFIG_UL(9, "QDSS", 64, 3),
+ MHI_CHANNEL_CONFIG_UL(14, "NMEA", 32, 4),
+ MHI_CHANNEL_CONFIG_DL(15, "NMEA", 32, 4),
+ MHI_CHANNEL_CONFIG_UL(16, "CSM_CTRL", 32, 4),
+ MHI_CHANNEL_CONFIG_DL(17, "CSM_CTRL", 32, 4),
+ MHI_CHANNEL_CONFIG_UL(40, "MHI_PHC", 32, 4),
+ MHI_CHANNEL_CONFIG_DL(41, "MHI_PHC", 32, 4),
+ MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 256, 5),
+ MHI_CHANNEL_CONFIG_DL(47, "IP_SW0", 256, 5),
+};
+
+static struct mhi_event_config mhi_qcom_qdu100_events[] = {
+ /* first ring is control+data ring */
+ MHI_EVENT_CONFIG_CTRL(0, 64),
+ /* SAHARA dedicated event ring */
+ MHI_EVENT_CONFIG_SW_DATA(1, 256),
+ /* Software channels dedicated event ring */
+ MHI_EVENT_CONFIG_SW_DATA(2, 64),
+ MHI_EVENT_CONFIG_SW_DATA(3, 256),
+ MHI_EVENT_CONFIG_SW_DATA(4, 256),
+ /* Software IP channels dedicated event ring */
+ MHI_EVENT_CONFIG_SW_DATA(5, 512),
+ MHI_EVENT_CONFIG_SW_DATA(6, 512),
+ MHI_EVENT_CONFIG_SW_DATA(7, 512),
+};
+
+static const struct mhi_controller_config mhi_qcom_qdu100_config = {
+ .max_channels = 128,
+ .timeout_ms = 120000,
+ .num_channels = ARRAY_SIZE(mhi_qcom_qdu100_channels),
+ .ch_cfg = mhi_qcom_qdu100_channels,
+ .num_events = ARRAY_SIZE(mhi_qcom_qdu100_events),
+ .event_cfg = mhi_qcom_qdu100_events,
+};
+
+static const struct mhi_pci_dev_info mhi_qcom_qdu100_info = {
+ .name = "qcom-qdu100",
+ .fw = "qcom/qdu100/xbl_s.melf",
+ .edl_trigger = true,
+ .config = &mhi_qcom_qdu100_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+};
+
+static const struct mhi_channel_config mhi_qcom_sa8775p_channels[] = {
+ MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 2048, 1),
+ MHI_CHANNEL_CONFIG_DL(47, "IP_SW0", 2048, 2),
+};
+
+static struct mhi_event_config mhi_qcom_sa8775p_events[] = {
+ /* first ring is control+data ring */
+ MHI_EVENT_CONFIG_CTRL(0, 64),
+ /* Software channels dedicated event ring */
+ MHI_EVENT_CONFIG_SW_DATA(1, 64),
+ MHI_EVENT_CONFIG_SW_DATA(2, 64),
+};
+
static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1),
MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1),
@@ -269,6 +340,15 @@ static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
MHI_EVENT_CONFIG_HW_DATA(5, 2048, 101)
};
+static const struct mhi_controller_config mhi_qcom_sa8775p_config = {
+ .max_channels = 128,
+ .timeout_ms = 8000,
+ .num_channels = ARRAY_SIZE(mhi_qcom_sa8775p_channels),
+ .ch_cfg = mhi_qcom_sa8775p_channels,
+ .num_events = ARRAY_SIZE(mhi_qcom_sa8775p_events),
+ .event_cfg = mhi_qcom_sa8775p_events,
+};
+
static const struct mhi_controller_config modem_qcom_v2_mhiv_config = {
.max_channels = 128,
.timeout_ms = 8000,
@@ -288,10 +368,21 @@ static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
.event_cfg = modem_qcom_v1_mhi_events,
};
+static const struct mhi_pci_dev_info mhi_qcom_sa8775p_info = {
+ .name = "qcom-sa8775p",
+ .edl_trigger = false,
+ .config = &mhi_qcom_sa8775p_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
static const struct mhi_pci_dev_info mhi_qcom_sdx75_info = {
.name = "qcom-sdx75m",
.fw = "qcom/sdx75m/xbl.elf",
.edl = "qcom/sdx75m/edl.mbn",
+ .edl_trigger = true,
.config = &modem_qcom_v2_mhiv_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -302,6 +393,7 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
.name = "qcom-sdx65m",
.fw = "qcom/sdx65m/xbl.elf",
.edl = "qcom/sdx65m/edl.mbn",
+ .edl_trigger = true,
.config = &modem_qcom_v1_mhiv_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -312,6 +404,7 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
.name = "qcom-sdx55m",
.fw = "qcom/sdx55m/sbl1.mbn",
.edl = "qcom/sdx55m/edl.mbn",
+ .edl_trigger = true,
.config = &modem_qcom_v1_mhiv_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -391,6 +484,8 @@ static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
};
@@ -411,8 +506,20 @@ static const struct mhi_controller_config modem_foxconn_sdx55_config = {
.event_cfg = mhi_foxconn_sdx55_events,
};
-static const struct mhi_pci_dev_info mhi_foxconn_sdx24_info = {
- .name = "foxconn-sdx24",
+static const struct mhi_controller_config modem_foxconn_sdx72_config = {
+ .max_channels = 128,
+ .timeout_ms = 20000,
+ .ready_timeout_ms = 50000,
+ .num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels),
+ .ch_cfg = mhi_foxconn_sdx55_channels,
+ .num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events),
+ .event_cfg = mhi_foxconn_sdx55_events,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
+ .name = "foxconn-sdx55",
+ .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn",
+ .edl_trigger = true,
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -420,10 +527,32 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx24_info = {
.sideband_wake = false,
};
-static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
- .name = "foxconn-sdx55",
- .fw = "qcom/sdx55m/sbl1.mbn",
- .edl = "qcom/sdx55m/edl.mbn",
+static const struct mhi_pci_dev_info mhi_foxconn_t99w175_info = {
+ .name = "foxconn-t99w175",
+ .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn",
+ .edl_trigger = true,
+ .config = &modem_foxconn_sdx55_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_dw5930e_info = {
+ .name = "foxconn-dw5930e",
+ .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn",
+ .edl_trigger = true,
+ .config = &modem_foxconn_sdx55_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_t99w368_info = {
+ .name = "foxconn-t99w368",
+ .edl = "qcom/sdx65m/foxconn/prog_firehose_lite.elf",
+ .edl_trigger = true,
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -431,8 +560,10 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
.sideband_wake = false,
};
-static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = {
- .name = "foxconn-sdx65",
+static const struct mhi_pci_dev_info mhi_foxconn_t99w373_info = {
+ .name = "foxconn-t99w373",
+ .edl = "qcom/sdx65m/foxconn/prog_firehose_lite.elf",
+ .edl_trigger = true,
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
@@ -440,6 +571,50 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = {
.sideband_wake = false,
};
+static const struct mhi_pci_dev_info mhi_foxconn_t99w510_info = {
+ .name = "foxconn-t99w510",
+ .edl = "qcom/sdx24m/foxconn/prog_firehose_sdx24.mbn",
+ .edl_trigger = true,
+ .config = &modem_foxconn_sdx55_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_dw5932e_info = {
+ .name = "foxconn-dw5932e",
+ .edl = "qcom/sdx65m/foxconn/prog_firehose_lite.elf",
+ .edl_trigger = true,
+ .config = &modem_foxconn_sdx55_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_t99w515_info = {
+ .name = "foxconn-t99w515",
+ .edl = "qcom/sdx72m/foxconn/edl.mbn",
+ .edl_trigger = true,
+ .config = &modem_foxconn_sdx72_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
+static const struct mhi_pci_dev_info mhi_foxconn_dw5934e_info = {
+ .name = "foxconn-dw5934e",
+ .edl = "qcom/sdx72m/foxconn/edl.mbn",
+ .edl_trigger = true,
+ .config = &modem_foxconn_sdx72_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
static const struct mhi_channel_config mhi_mv3x_channels[] = {
MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0),
MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0),
@@ -538,7 +713,7 @@ static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = {
MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
};
-static struct mhi_controller_config modem_telit_fn980_hw_v1_config = {
+static const struct mhi_controller_config modem_telit_fn980_hw_v1_config = {
.max_channels = 128,
.timeout_ms = 20000,
.num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels),
@@ -598,8 +773,78 @@ static const struct mhi_pci_dev_info mhi_telit_fn990_info = {
.mru_default = 32768,
};
+static const struct mhi_pci_dev_info mhi_telit_fe990a_info = {
+ .name = "telit-fe990a",
+ .config = &modem_telit_fn990_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+ .mru_default = 32768,
+};
+
+static const struct mhi_channel_config mhi_telit_fn920c04_channels[] = {
+ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1),
+ MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1),
+ MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3),
+};
+
+static const struct mhi_controller_config modem_telit_fn920c04_config = {
+ .max_channels = 128,
+ .timeout_ms = 50000,
+ .num_channels = ARRAY_SIZE(mhi_telit_fn920c04_channels),
+ .ch_cfg = mhi_telit_fn920c04_channels,
+ .num_events = ARRAY_SIZE(mhi_telit_fn990_events),
+ .event_cfg = mhi_telit_fn990_events,
+};
+
+static const struct mhi_pci_dev_info mhi_telit_fn920c04_info = {
+ .name = "telit-fn920c04",
+ .config = &modem_telit_fn920c04_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+ .mru_default = 32768,
+ .edl_trigger = true,
+};
+
+static const struct mhi_pci_dev_info mhi_netprisma_lcur57_info = {
+ .name = "netprisma-lcur57",
+ .edl = "qcom/prog_firehose_sdx24.mbn",
+ .config = &modem_quectel_em1xx_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = true,
+};
+
+static const struct mhi_pci_dev_info mhi_netprisma_fcun69_info = {
+ .name = "netprisma-fcun69",
+ .edl = "qcom/prog_firehose_sdx6x.elf",
+ .config = &modem_quectel_em1xx_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = true,
+};
+
/* Keep the list sorted based on the PID. New VID should be added as the last entry */
static const struct pci_device_id mhi_pci_id_table[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0116),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_sa8775p_info },
+ /* Telit FN920C04 (sdx35) */
+ {PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x011a, 0x1c5d, 0x2020),
+ .driver_data = (kernel_ulong_t) &mhi_telit_fn920c04_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, PCI_VENDOR_ID_QCOM, 0x010c),
@@ -615,13 +860,16 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* Telit FN990 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
- /* Telit FE990 */
+ /* Telit FE990A */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015),
- .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
+ .driver_data = (kernel_ulong_t) &mhi_telit_fe990a_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx75_info },
+ /* QDU100, x100-DU */
+ { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0601),
+ .driver_data = (kernel_ulong_t) &mhi_qcom_qdu100_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */
@@ -638,40 +886,49 @@ static const struct pci_device_id mhi_pci_id_table[] = {
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
/* T99W175 (sdx55), Both for eSIM and Non-eSIM */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info },
/* DW5930e (sdx55), With eSIM, It's also T99W175 */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b0),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5930e_info },
/* DW5930e (sdx55), Non-eSIM, It's also T99W175 */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5930e_info },
/* T99W175 (sdx55), Based on Qualcomm new baseline */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info },
/* T99W175 (sdx55) */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0c3),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info },
/* T99W368 (sdx65) */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w368_info },
/* T99W373 (sdx62) */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w373_info },
/* T99W510 (sdx24), variant 1 */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f0),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w510_info },
/* T99W510 (sdx24), variant 2 */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f1),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w510_info },
/* T99W510 (sdx24), variant 3 */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f2),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w510_info },
/* DW5932e-eSIM (sdx62), With eSIM */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f5),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5932e_info },
/* DW5932e (sdx62), Non-eSIM */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f9),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5932e_info },
+ /* T99W515 (sdx72) */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe118),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w515_info },
+ /* DW5934e(sdx72), With eSIM */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe11d),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5934e_info },
+ /* DW5934e(sdx72), Non-eSIM */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe11e),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5934e_info },
/* MV31-W (Cinterion) */
{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b3),
.driver_data = (kernel_ulong_t) &mhi_mv31_info },
@@ -686,7 +943,13 @@ static const struct pci_device_id mhi_pci_id_table[] = {
.driver_data = (kernel_ulong_t) &mhi_mv32_info },
/* T99W175 (sdx55), HP variant */
{ PCI_DEVICE(0x03f0, 0x0a6c),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info },
+ /* NETPRISMA LCUR57 (SDX24) */
+ { PCI_DEVICE(PCI_VENDOR_ID_NETPRISMA, 0x1000),
+ .driver_data = (kernel_ulong_t) &mhi_netprisma_lcur57_info },
+ /* NETPRISMA FCUN69 (SDX6X) */
+ { PCI_DEVICE(PCI_VENDOR_ID_NETPRISMA, 0x1001),
+ .driver_data = (kernel_ulong_t) &mhi_netprisma_fcun69_info },
{ }
};
MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
@@ -772,22 +1035,18 @@ static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
int err;
- err = pci_assign_resource(pdev, bar_num);
- if (err)
- return err;
-
err = pcim_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "failed to enable pci device: %d\n", err);
return err;
}
- err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev));
- if (err) {
+ mhi_cntrl->regs = pcim_iomap_region(pdev, bar_num, pci_name(pdev));
+ if (IS_ERR(mhi_cntrl->regs)) {
+ err = PTR_ERR(mhi_cntrl->regs);
dev_err(&pdev->dev, "failed to map pci region: %d\n", err);
return err;
}
- mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num];
mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num);
err = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
@@ -814,7 +1073,7 @@ static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
*/
mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events;
- nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI);
+ nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSIX | PCI_IRQ_MSI);
if (nr_vectors < 0) {
dev_err(&pdev->dev, "Error allocating MSI vectors %d\n",
nr_vectors);
@@ -872,7 +1131,7 @@ static void mhi_pci_recovery_work(struct work_struct *work)
dev_warn(&pdev->dev, "device recovery started\n");
- del_timer(&mhi_pdev->health_check_timer);
+ timer_delete(&mhi_pdev->health_check_timer);
pm_runtime_forbid(&pdev->dev);
/* Clean up MHI state */
@@ -905,13 +1164,15 @@ static void mhi_pci_recovery_work(struct work_struct *work)
err_unprepare:
mhi_unprepare_after_power_down(mhi_cntrl);
err_try_reset:
- if (pci_reset_function(pdev))
- dev_err(&pdev->dev, "Recovery failed\n");
+ err = pci_try_reset_function(pdev);
+ if (err)
+ dev_err(&pdev->dev, "Recovery failed: %d\n", err);
}
static void health_check(struct timer_list *t)
{
- struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer);
+ struct mhi_pci_device *mhi_pdev = timer_container_of(mhi_pdev, t,
+ health_check_timer);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
@@ -928,6 +1189,40 @@ static void health_check(struct timer_list *t)
mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
}
+static int mhi_pci_generic_edl_trigger(struct mhi_controller *mhi_cntrl)
+{
+ void __iomem *base = mhi_cntrl->regs;
+ void __iomem *edl_db;
+ int ret;
+ u32 val;
+
+ ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
+ if (ret) {
+ dev_err(mhi_cntrl->cntrl_dev, "Failed to wakeup the device\n");
+ return ret;
+ }
+
+ pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0);
+ mhi_cntrl->runtime_get(mhi_cntrl);
+
+ ret = mhi_get_channel_doorbell_offset(mhi_cntrl, &val);
+ if (ret)
+ goto err_get_chdb;
+
+ edl_db = base + val + (8 * MHI_EDL_DB);
+
+ mhi_cntrl->write_reg(mhi_cntrl, edl_db + 4, upper_32_bits(MHI_EDL_COOKIE));
+ mhi_cntrl->write_reg(mhi_cntrl, edl_db, lower_32_bits(MHI_EDL_COOKIE));
+
+ mhi_soc_reset(mhi_cntrl);
+
+err_get_chdb:
+ mhi_cntrl->runtime_put(mhi_cntrl);
+ mhi_device_put(mhi_cntrl->mhi_dev);
+
+ return ret;
+}
+
static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data;
@@ -961,6 +1256,10 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mhi_cntrl->runtime_get = mhi_pci_runtime_get;
mhi_cntrl->runtime_put = mhi_pci_runtime_put;
mhi_cntrl->mru = info->mru_default;
+ mhi_cntrl->name = info->name;
+
+ if (info->edl_trigger)
+ mhi_cntrl->edl_trigger = mhi_pci_generic_edl_trigger;
if (info->sideband_wake) {
mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
@@ -1030,7 +1329,7 @@ static void mhi_pci_remove(struct pci_dev *pdev)
struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
- del_timer_sync(&mhi_pdev->health_check_timer);
+ timer_delete_sync(&mhi_pdev->health_check_timer);
cancel_work_sync(&mhi_pdev->recovery_work);
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
@@ -1058,7 +1357,7 @@ static void mhi_pci_reset_prepare(struct pci_dev *pdev)
dev_info(&pdev->dev, "reset\n");
- del_timer(&mhi_pdev->health_check_timer);
+ timer_delete(&mhi_pdev->health_check_timer);
/* Clean up MHI state */
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
@@ -1168,7 +1467,7 @@ static int __maybe_unused mhi_pci_runtime_suspend(struct device *dev)
if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
return 0;
- del_timer(&mhi_pdev->health_check_timer);
+ timer_delete(&mhi_pdev->health_check_timer);
cancel_work_sync(&mhi_pdev->recovery_work);
if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
index a2f2feef1476..33d92bf2fc3e 100644
--- a/drivers/bus/mhi/host/pm.c
+++ b/drivers/bus/mhi/host/pm.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/wait.h>
#include "internal.h"
+#include "trace.h"
/*
* Not all MHI state transitions are synchronous. Transitions like Linkdown,
@@ -36,7 +37,10 @@
* M0 <--> M0
* M0 -> FW_DL_ERR
* M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
- * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
+ * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS
+ * SYS_ERR_PROCESS -> SYS_ERR_FAIL
+ * SYS_ERR_FAIL -> SYS_ERR_DETECT
+ * SYS_ERR_PROCESS --> POR
* L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
* SHUTDOWN_PROCESS -> DISABLE
* L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
@@ -93,7 +97,12 @@ static const struct mhi_pm_transitions dev_state_transitions[] = {
},
{
MHI_PM_SYS_ERR_PROCESS,
- MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_POR | MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_SYS_ERR_FAIL,
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
MHI_PM_LD_ERR_FATAL_DETECT
},
/* L2 States */
@@ -123,6 +132,7 @@ enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cn
if (unlikely(!(dev_state_transitions[index].to_states & state)))
return cur_state;
+ trace_mhi_tryset_pm_state(mhi_cntrl, state);
mhi_cntrl->pm_state = state;
return mhi_cntrl->pm_state;
}
@@ -458,7 +468,8 @@ error_mission_mode:
}
/* Handle shutdown transitions */
-static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
+static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
+ bool destroy_device)
{
enum mhi_pm_state cur_state;
struct mhi_event *mhi_event;
@@ -520,8 +531,16 @@ skip_mhi_reset:
dev_dbg(dev, "Waiting for all pending threads to complete\n");
wake_up_all(&mhi_cntrl->state_event);
- dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
- device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
+ /*
+ * Only destroy the 'struct device' for channels if indicated by the
+ * 'destroy_device' flag. Because, during system suspend or hibernation
+ * state, there is no need to destroy the 'struct device' as the endpoint
+ * device would still be physically attached to the machine.
+ */
+ if (destroy_device) {
+ dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
+ device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
+ }
mutex_lock(&mhi_cntrl->pm_mutex);
@@ -583,6 +602,7 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
struct mhi_cmd *mhi_cmd;
struct mhi_event_ctxt *er_ctxt;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ bool reset_device = false;
int ret, i;
dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
@@ -611,8 +631,23 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
/* Wake up threads waiting for state transition */
wake_up_all(&mhi_cntrl->state_event);
- /* Trigger MHI RESET so that the device will not access host memory */
if (MHI_REG_ACCESS_VALID(prev_state)) {
+ /*
+ * If the device is in PBL or SBL, it will only respond to
+ * RESET if the device is in SYSERR state. SYSERR might
+ * already be cleared at this point.
+ */
+ enum mhi_state cur_state = mhi_get_mhi_state(mhi_cntrl);
+ enum mhi_ee_type cur_ee = mhi_get_exec_env(mhi_cntrl);
+
+ if (cur_state == MHI_STATE_SYS_ERR)
+ reset_device = true;
+ else if (cur_ee != MHI_EE_PBL && cur_ee != MHI_EE_SBL)
+ reset_device = true;
+ }
+
+ /* Trigger MHI RESET so that the device will not access host memory */
+ if (reset_device) {
u32 in_reset = -1;
unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
@@ -629,7 +664,13 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
!in_reset, timeout);
if (!ret || in_reset) {
dev_err(dev, "Device failed to exit MHI Reset state\n");
- goto exit_sys_error_transition;
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_SYS_ERR_FAIL);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ /* Shutdown may have occurred, otherwise cleanup now */
+ if (cur_state != MHI_PM_SYS_ERR_FAIL)
+ goto exit_sys_error_transition;
}
/*
@@ -758,7 +799,6 @@ void mhi_pm_st_worker(struct work_struct *work)
struct mhi_controller *mhi_cntrl = container_of(work,
struct mhi_controller,
st_worker);
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
spin_lock_irq(&mhi_cntrl->transition_lock);
list_splice_tail_init(&mhi_cntrl->transition_list, &head);
@@ -766,8 +806,8 @@ void mhi_pm_st_worker(struct work_struct *work)
list_for_each_entry_safe(itr, tmp, &head, node) {
list_del(&itr->node);
- dev_dbg(dev, "Handling state transition: %s\n",
- TO_DEV_STATE_TRANS_STR(itr->state));
+
+ trace_mhi_pm_st_transition(mhi_cntrl, itr->state);
switch (itr->state) {
case DEV_ST_TRANSITION_PBL:
@@ -806,7 +846,10 @@ void mhi_pm_st_worker(struct work_struct *work)
mhi_pm_sys_error_transition(mhi_cntrl);
break;
case DEV_ST_TRANSITION_DISABLE:
- mhi_pm_disable_transition(mhi_cntrl);
+ mhi_pm_disable_transition(mhi_cntrl, false);
+ break;
+ case DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE:
+ mhi_pm_disable_transition(mhi_cntrl, true);
break;
default:
break;
@@ -1160,7 +1203,8 @@ error_exit:
}
EXPORT_SYMBOL_GPL(mhi_async_power_up);
-void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
+static void __mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful,
+ bool destroy_device)
{
enum mhi_pm_state cur_state, transition_state;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
@@ -1196,15 +1240,32 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
write_unlock_irq(&mhi_cntrl->pm_lock);
mutex_unlock(&mhi_cntrl->pm_mutex);
- mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
+ if (destroy_device)
+ mhi_queue_state_transition(mhi_cntrl,
+ DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE);
+ else
+ mhi_queue_state_transition(mhi_cntrl,
+ DEV_ST_TRANSITION_DISABLE);
/* Wait for shutdown to complete */
flush_work(&mhi_cntrl->st_worker);
disable_irq(mhi_cntrl->irq[0]);
}
+
+void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
+{
+ __mhi_power_down(mhi_cntrl, graceful, true);
+}
EXPORT_SYMBOL_GPL(mhi_power_down);
+void mhi_power_down_keep_dev(struct mhi_controller *mhi_cntrl,
+ bool graceful)
+{
+ __mhi_power_down(mhi_cntrl, graceful, false);
+}
+EXPORT_SYMBOL_GPL(mhi_power_down_keep_dev);
+
int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
{
int ret = mhi_async_power_up(mhi_cntrl);
@@ -1251,20 +1312,6 @@ int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
}
EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
-void mhi_device_get(struct mhi_device *mhi_dev)
-{
- struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
-
- mhi_dev->dev_wake++;
- read_lock_bh(&mhi_cntrl->pm_lock);
- if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
- mhi_trigger_resume(mhi_cntrl);
-
- mhi_cntrl->wake_get(mhi_cntrl, true);
- read_unlock_bh(&mhi_cntrl->pm_lock);
-}
-EXPORT_SYMBOL_GPL(mhi_device_get);
-
int mhi_device_get_sync(struct mhi_device *mhi_dev)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
diff --git a/drivers/bus/mhi/host/trace.h b/drivers/bus/mhi/host/trace.h
new file mode 100644
index 000000000000..3e0c41777429
--- /dev/null
+++ b/drivers/bus/mhi/host/trace.h
@@ -0,0 +1,283 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mhi_host
+
+#if !defined(_TRACE_EVENT_MHI_HOST_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EVENT_MHI_HOST_H
+
+#include <linux/byteorder/generic.h>
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+#include "../common.h"
+#include "internal.h"
+
+#undef mhi_state
+#undef mhi_state_end
+
+#define mhi_state(a, b) TRACE_DEFINE_ENUM(MHI_STATE_##a);
+#define mhi_state_end(a, b) TRACE_DEFINE_ENUM(MHI_STATE_##a);
+
+MHI_STATE_LIST
+
+#undef mhi_state
+#undef mhi_state_end
+
+#define mhi_state(a, b) { MHI_STATE_##a, b },
+#define mhi_state_end(a, b) { MHI_STATE_##a, b }
+
+#undef mhi_pm_state
+#undef mhi_pm_state_end
+
+#define mhi_pm_state(a, b) TRACE_DEFINE_ENUM(MHI_PM_STATE_##a);
+#define mhi_pm_state_end(a, b) TRACE_DEFINE_ENUM(MHI_PM_STATE_##a);
+
+MHI_PM_STATE_LIST
+
+#undef mhi_pm_state
+#undef mhi_pm_state_end
+
+#define mhi_pm_state(a, b) { MHI_PM_STATE_##a, b },
+#define mhi_pm_state_end(a, b) { MHI_PM_STATE_##a, b }
+
+#undef mhi_ee
+#undef mhi_ee_end
+
+#define mhi_ee(a, b) TRACE_DEFINE_ENUM(MHI_EE_##a);
+#define mhi_ee_end(a, b) TRACE_DEFINE_ENUM(MHI_EE_##a);
+
+MHI_EE_LIST
+
+#undef mhi_ee
+#undef mhi_ee_end
+
+#define mhi_ee(a, b) { MHI_EE_##a, b },
+#define mhi_ee_end(a, b) { MHI_EE_##a, b }
+
+#undef ch_state_type
+#undef ch_state_type_end
+
+#define ch_state_type(a, b) TRACE_DEFINE_ENUM(MHI_CH_STATE_TYPE_##a);
+#define ch_state_type_end(a, b) TRACE_DEFINE_ENUM(MHI_CH_STATE_TYPE_##a);
+
+MHI_CH_STATE_TYPE_LIST
+
+#undef ch_state_type
+#undef ch_state_type_end
+
+#define ch_state_type(a, b) { MHI_CH_STATE_TYPE_##a, b },
+#define ch_state_type_end(a, b) { MHI_CH_STATE_TYPE_##a, b }
+
+#undef dev_st_trans
+#undef dev_st_trans_end
+
+#define dev_st_trans(a, b) TRACE_DEFINE_ENUM(DEV_ST_TRANSITION_##a);
+#define dev_st_trans_end(a, b) TRACE_DEFINE_ENUM(DEV_ST_TRANSITION_##a);
+
+DEV_ST_TRANSITION_LIST
+
+#undef dev_st_trans
+#undef dev_st_trans_end
+
+#define dev_st_trans(a, b) { DEV_ST_TRANSITION_##a, b },
+#define dev_st_trans_end(a, b) { DEV_ST_TRANSITION_##a, b }
+
+#define TPS(x) tracepoint_string(x)
+
+TRACE_EVENT(mhi_gen_tre,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ struct mhi_ring_element *mhi_tre),
+
+ TP_ARGS(mhi_cntrl, mhi_chan, mhi_tre),
+
+ TP_STRUCT__entry(
+ __string(name, mhi_cntrl->mhi_dev->name)
+ __field(int, ch_num)
+ __field(void *, wp)
+ __field(uint64_t, tre_ptr)
+ __field(uint32_t, dword0)
+ __field(uint32_t, dword1)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ __entry->ch_num = mhi_chan->chan;
+ __entry->wp = mhi_tre;
+ __entry->tre_ptr = le64_to_cpu(mhi_tre->ptr);
+ __entry->dword0 = le32_to_cpu(mhi_tre->dword[0]);
+ __entry->dword1 = le32_to_cpu(mhi_tre->dword[1]);
+ ),
+
+ TP_printk("%s: Chan: %d TRE: 0x%p TRE buf: 0x%llx DWORD0: 0x%08x DWORD1: 0x%08x\n",
+ __get_str(name), __entry->ch_num, __entry->wp, __entry->tre_ptr,
+ __entry->dword0, __entry->dword1)
+);
+
+TRACE_EVENT(mhi_intvec_states,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, int dev_ee, int dev_state),
+
+ TP_ARGS(mhi_cntrl, dev_ee, dev_state),
+
+ TP_STRUCT__entry(
+ __string(name, mhi_cntrl->mhi_dev->name)
+ __field(int, local_ee)
+ __field(int, state)
+ __field(int, dev_ee)
+ __field(int, dev_state)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ __entry->local_ee = mhi_cntrl->ee;
+ __entry->state = mhi_cntrl->dev_state;
+ __entry->dev_ee = dev_ee;
+ __entry->dev_state = dev_state;
+ ),
+
+ TP_printk("%s: Local EE: %s State: %s Device EE: %s Dev State: %s\n",
+ __get_str(name),
+ __print_symbolic(__entry->local_ee, MHI_EE_LIST),
+ __print_symbolic(__entry->state, MHI_STATE_LIST),
+ __print_symbolic(__entry->dev_ee, MHI_EE_LIST),
+ __print_symbolic(__entry->dev_state, MHI_STATE_LIST))
+);
+
+TRACE_EVENT(mhi_tryset_pm_state,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, int pm_state),
+
+ TP_ARGS(mhi_cntrl, pm_state),
+
+ TP_STRUCT__entry(
+ __string(name, mhi_cntrl->mhi_dev->name)
+ __field(int, pm_state)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ if (pm_state)
+ pm_state = __fls(pm_state);
+ __entry->pm_state = pm_state;
+ ),
+
+ TP_printk("%s: PM state: %s\n", __get_str(name),
+ __print_symbolic(__entry->pm_state, MHI_PM_STATE_LIST))
+);
+
+DECLARE_EVENT_CLASS(mhi_process_event_ring,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_ring_element *rp),
+
+ TP_ARGS(mhi_cntrl, rp),
+
+ TP_STRUCT__entry(
+ __string(name, mhi_cntrl->mhi_dev->name)
+ __field(uint32_t, dword0)
+ __field(uint32_t, dword1)
+ __field(int, state)
+ __field(uint64_t, ptr)
+ __field(void *, rp)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ __entry->rp = rp;
+ __entry->ptr = le64_to_cpu(rp->ptr);
+ __entry->dword0 = le32_to_cpu(rp->dword[0]);
+ __entry->dword1 = le32_to_cpu(rp->dword[1]);
+ __entry->state = MHI_TRE_GET_EV_STATE(rp);
+ ),
+
+ TP_printk("%s: TRE: 0x%p TRE buf: 0x%llx DWORD0: 0x%08x DWORD1: 0x%08x State: %s\n",
+ __get_str(name), __entry->rp, __entry->ptr, __entry->dword0,
+ __entry->dword1, __print_symbolic(__entry->state, MHI_STATE_LIST))
+);
+
+DEFINE_EVENT(mhi_process_event_ring, mhi_data_event,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_ring_element *rp),
+
+ TP_ARGS(mhi_cntrl, rp)
+);
+
+DEFINE_EVENT(mhi_process_event_ring, mhi_ctrl_event,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_ring_element *rp),
+
+ TP_ARGS(mhi_cntrl, rp)
+);
+
+DECLARE_EVENT_CLASS(mhi_update_channel_state,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, int state,
+ const char *reason),
+
+ TP_ARGS(mhi_cntrl, mhi_chan, state, reason),
+
+ TP_STRUCT__entry(
+ __string(name, mhi_cntrl->mhi_dev->name)
+ __field(int, ch_num)
+ __field(int, state)
+ __field(const char *, reason)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ __entry->ch_num = mhi_chan->chan;
+ __entry->state = state;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("%s: chan%d: %s state to: %s\n",
+ __get_str(name), __entry->ch_num, __entry->reason,
+ __print_symbolic(__entry->state, MHI_CH_STATE_TYPE_LIST))
+);
+
+DEFINE_EVENT(mhi_update_channel_state, mhi_channel_command_start,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, int state,
+ const char *reason),
+
+ TP_ARGS(mhi_cntrl, mhi_chan, state, reason)
+);
+
+DEFINE_EVENT(mhi_update_channel_state, mhi_channel_command_end,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, int state,
+ const char *reason),
+
+ TP_ARGS(mhi_cntrl, mhi_chan, state, reason)
+);
+
+TRACE_EVENT(mhi_pm_st_transition,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, int state),
+
+ TP_ARGS(mhi_cntrl, state),
+
+ TP_STRUCT__entry(
+ __string(name, mhi_cntrl->mhi_dev->name)
+ __field(int, state)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ __entry->state = state;
+ ),
+
+ TP_printk("%s: Handling state transition: %s\n", __get_str(name),
+ __print_symbolic(__entry->state, DEV_ST_TRANSITION_LIST))
+);
+
+#endif
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/bus/mhi/host
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
index 554e1992edd4..12dd32fd0b62 100644
--- a/drivers/bus/mips_cdmm.c
+++ b/drivers/bus/mips_cdmm.c
@@ -37,7 +37,7 @@
/* Each block of device registers is 64 bytes */
#define CDMM_DRB_SIZE 64
-#define to_mips_cdmm_driver(d) container_of(d, struct mips_cdmm_driver, drv)
+#define to_mips_cdmm_driver(d) container_of_const(d, struct mips_cdmm_driver, drv)
/* Default physical base address */
static phys_addr_t mips_cdmm_default_base;
@@ -59,10 +59,10 @@ mips_cdmm_lookup(const struct mips_cdmm_device_id *table,
return ret ? table : NULL;
}
-static int mips_cdmm_match(struct device *dev, struct device_driver *drv)
+static int mips_cdmm_match(struct device *dev, const struct device_driver *drv)
{
struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
- struct mips_cdmm_driver *cdrv = to_mips_cdmm_driver(drv);
+ const struct mips_cdmm_driver *cdrv = to_mips_cdmm_driver(drv);
return mips_cdmm_lookup(cdrv->id_table, cdev) != NULL;
}
@@ -118,7 +118,7 @@ static struct attribute *mips_cdmm_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(mips_cdmm_dev);
-struct bus_type mips_cdmm_bustype = {
+const struct bus_type mips_cdmm_bustype = {
.name = "cdmm",
.dev_groups = mips_cdmm_dev_groups,
.match = mips_cdmm_match,
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index 641c1a6adc8a..6c3e5c5dae10 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -83,10 +83,10 @@ static const struct attribute_group *moxtet_dev_groups[] = {
NULL,
};
-static int moxtet_match(struct device *dev, struct device_driver *drv)
+static int moxtet_match(struct device *dev, const struct device_driver *drv)
{
struct moxtet_device *mdev = to_moxtet_device(dev);
- struct moxtet_driver *tdrv = to_moxtet_driver(drv);
+ const struct moxtet_driver *tdrv = to_moxtet_driver(drv);
const enum turris_mox_module_id *t;
if (of_driver_match_device(dev, drv))
@@ -484,7 +484,6 @@ static const struct file_operations input_fops = {
.owner = THIS_MODULE,
.open = moxtet_debug_open,
.read = input_read,
- .llseek = no_llseek,
};
static ssize_t output_read(struct file *file, char __user *buf, size_t len,
@@ -549,7 +548,6 @@ static const struct file_operations output_fops = {
.open = moxtet_debug_open,
.read = output_read,
.write = output_write,
- .llseek = no_llseek,
};
static int moxtet_register_debugfs(struct moxtet *moxtet)
@@ -659,7 +657,7 @@ static void moxtet_irq_print_chip(struct irq_data *d, struct seq_file *p)
id = moxtet->modules[pos->idx];
- seq_printf(p, " moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
+ seq_printf(p, "moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
pos->bit);
}
@@ -739,9 +737,9 @@ static int moxtet_irq_setup(struct moxtet *moxtet)
{
int i, ret;
- moxtet->irq.domain = irq_domain_add_simple(moxtet->dev->of_node,
- MOXTET_NIRQS, 0,
- &moxtet_irq_domain, moxtet);
+ moxtet->irq.domain = irq_domain_create_simple(of_fwnode_handle(moxtet->dev->of_node),
+ MOXTET_NIRQS, 0,
+ &moxtet_irq_domain, moxtet);
if (moxtet->irq.domain == NULL) {
dev_err(moxtet->dev, "Could not add IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
index 7d7479ba0a75..e4dfda7b3b10 100644
--- a/drivers/bus/omap-ocp2scp.c
+++ b/drivers/bus/omap-ocp2scp.c
@@ -101,7 +101,7 @@ MODULE_DEVICE_TABLE(of, omap_ocp2scp_id_table);
static struct platform_driver omap_ocp2scp_driver = {
.probe = omap_ocp2scp_probe,
- .remove_new = omap_ocp2scp_remove,
+ .remove = omap_ocp2scp_remove,
.driver = {
.name = "omap-ocp2scp",
.of_match_table = of_match_ptr(omap_ocp2scp_id_table),
diff --git a/drivers/bus/omap_l3_smx.c b/drivers/bus/omap_l3_smx.c
index ee6d29925e4d..7f0a8f8b3f4c 100644
--- a/drivers/bus/omap_l3_smx.c
+++ b/drivers/bus/omap_l3_smx.c
@@ -273,7 +273,7 @@ static void omap3_l3_remove(struct platform_device *pdev)
static struct platform_driver omap3_l3_driver = {
.probe = omap3_l3_probe,
- .remove_new = omap3_l3_remove,
+ .remove = omap3_l3_remove,
.driver = {
.name = "omap_l3_smx",
.of_match_table = of_match_ptr(omap3_l3_match),
diff --git a/drivers/bus/qcom-ssc-block-bus.c b/drivers/bus/qcom-ssc-block-bus.c
index 5931974a21fa..7f5fd4e0940d 100644
--- a/drivers/bus/qcom-ssc-block-bus.c
+++ b/drivers/bus/qcom-ssc-block-bus.c
@@ -264,18 +264,6 @@ static int qcom_ssc_block_bus_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
- data->pd_names = qcom_ssc_block_pd_names;
- data->num_pds = ARRAY_SIZE(qcom_ssc_block_pd_names);
-
- /* power domains */
- ret = qcom_ssc_block_bus_pds_attach(&pdev->dev, data->pds, data->pd_names, data->num_pds);
- if (ret < 0)
- return dev_err_probe(&pdev->dev, ret, "error when attaching power domains\n");
-
- ret = qcom_ssc_block_bus_pds_enable(data->pds, data->num_pds);
- if (ret < 0)
- return dev_err_probe(&pdev->dev, ret, "error when enabling power domains\n");
-
/* low level overrides for when the HW logic doesn't "just work" */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpm_sscaon_config0");
data->reg_mpm_sscaon_config0 = devm_ioremap_resource(&pdev->dev, res);
@@ -343,11 +331,30 @@ static int qcom_ssc_block_bus_probe(struct platform_device *pdev)
data->ssc_axi_halt = halt_args.args[0];
+ /* power domains */
+ data->pd_names = qcom_ssc_block_pd_names;
+ data->num_pds = ARRAY_SIZE(qcom_ssc_block_pd_names);
+
+ ret = qcom_ssc_block_bus_pds_attach(&pdev->dev, data->pds, data->pd_names, data->num_pds);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "error when attaching power domains\n");
+
+ ret = qcom_ssc_block_bus_pds_enable(data->pds, data->num_pds);
+ if (ret < 0) {
+ dev_err_probe(&pdev->dev, ret, "error when enabling power domains\n");
+ goto err_detach_pds_bus;
+ }
+
qcom_ssc_block_bus_init(&pdev->dev);
of_platform_populate(np, NULL, NULL, &pdev->dev);
return 0;
+
+err_detach_pds_bus:
+ qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds);
+
+ return ret;
}
static void qcom_ssc_block_bus_remove(struct platform_device *pdev)
@@ -356,9 +363,6 @@ static void qcom_ssc_block_bus_remove(struct platform_device *pdev)
qcom_ssc_block_bus_deinit(&pdev->dev);
- iounmap(data->reg_mpm_sscaon_config0);
- iounmap(data->reg_mpm_sscaon_config1);
-
qcom_ssc_block_bus_pds_disable(data->pds, data->num_pds);
qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds);
pm_runtime_disable(&pdev->dev);
@@ -373,7 +377,7 @@ MODULE_DEVICE_TABLE(of, qcom_ssc_block_bus_of_match);
static struct platform_driver qcom_ssc_block_bus_driver = {
.probe = qcom_ssc_block_bus_probe,
- .remove_new = qcom_ssc_block_bus_remove,
+ .remove = qcom_ssc_block_bus_remove,
.driver = {
.name = "qcom-ssc-block-bus",
.of_match_table = qcom_ssc_block_bus_of_match,
diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c
index 50870c827889..d8e029e7e53f 100644
--- a/drivers/bus/simple-pm-bus.c
+++ b/drivers/bus/simple-pm-bus.c
@@ -109,9 +109,29 @@ static int simple_pm_bus_runtime_resume(struct device *dev)
return 0;
}
+static int simple_pm_bus_suspend(struct device *dev)
+{
+ struct simple_pm_bus *bus = dev_get_drvdata(dev);
+
+ if (!bus)
+ return 0;
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int simple_pm_bus_resume(struct device *dev)
+{
+ struct simple_pm_bus *bus = dev_get_drvdata(dev);
+
+ if (!bus)
+ return 0;
+
+ return pm_runtime_force_resume(dev);
+}
+
static const struct dev_pm_ops simple_pm_bus_pm_ops = {
RUNTIME_PM_OPS(simple_pm_bus_runtime_suspend, simple_pm_bus_runtime_resume, NULL)
- NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(simple_pm_bus_suspend, simple_pm_bus_resume)
};
#define ONLY_BUS ((void *) 1) /* Match if the device is only a bus. */
@@ -128,7 +148,7 @@ MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match);
static struct platform_driver simple_pm_bus_driver = {
.probe = simple_pm_bus_probe,
- .remove_new = simple_pm_bus_remove,
+ .remove = simple_pm_bus_remove,
.driver = {
.name = "simple-pm-bus",
.of_match_table = simple_pm_bus_of_match,
diff --git a/drivers/bus/stm32_etzpc.c b/drivers/bus/stm32_etzpc.c
new file mode 100644
index 000000000000..7fc0f16960be
--- /dev/null
+++ b/drivers/bus/stm32_etzpc.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023, STMicroelectronics - All Rights Reserved
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include "stm32_firewall.h"
+
+/*
+ * ETZPC registers
+ */
+#define ETZPC_DECPROT 0x10
+#define ETZPC_HWCFGR 0x3F0
+
+/*
+ * HWCFGR register
+ */
+#define ETZPC_HWCFGR_NUM_TZMA GENMASK(7, 0)
+#define ETZPC_HWCFGR_NUM_PER_SEC GENMASK(15, 8)
+#define ETZPC_HWCFGR_NUM_AHB_SEC GENMASK(23, 16)
+#define ETZPC_HWCFGR_CHUNKS1N4 GENMASK(31, 24)
+
+/*
+ * ETZPC miscellaneous
+ */
+#define ETZPC_PROT_MASK GENMASK(1, 0)
+#define ETZPC_PROT_A7NS 0x3
+#define ETZPC_DECPROT_SHIFT 1
+
+#define IDS_PER_DECPROT_REGS 16
+
+static int stm32_etzpc_grant_access(struct stm32_firewall_controller *ctrl, u32 firewall_id)
+{
+ u32 offset, reg_offset, sec_val;
+
+ if (firewall_id >= ctrl->max_entries) {
+ dev_err(ctrl->dev, "Invalid sys bus ID %u", firewall_id);
+ return -EINVAL;
+ }
+
+ /* Check access configuration, 16 peripherals per register */
+ reg_offset = ETZPC_DECPROT + 0x4 * (firewall_id / IDS_PER_DECPROT_REGS);
+ offset = (firewall_id % IDS_PER_DECPROT_REGS) << ETZPC_DECPROT_SHIFT;
+
+ /* Verify peripheral is non-secure and attributed to cortex A7 */
+ sec_val = (readl(ctrl->mmio + reg_offset) >> offset) & ETZPC_PROT_MASK;
+ if (sec_val != ETZPC_PROT_A7NS) {
+ dev_dbg(ctrl->dev, "Invalid bus configuration: reg_offset %#x, value %d\n",
+ reg_offset, sec_val);
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+static void stm32_etzpc_release_access(struct stm32_firewall_controller *ctrl __maybe_unused,
+ u32 firewall_id __maybe_unused)
+{
+}
+
+static int stm32_etzpc_probe(struct platform_device *pdev)
+{
+ struct stm32_firewall_controller *etzpc_controller;
+ struct device_node *np = pdev->dev.of_node;
+ u32 nb_per, nb_master;
+ struct resource *res;
+ void __iomem *mmio;
+ int rc;
+
+ etzpc_controller = devm_kzalloc(&pdev->dev, sizeof(*etzpc_controller), GFP_KERNEL);
+ if (!etzpc_controller)
+ return -ENOMEM;
+
+ mmio = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(mmio))
+ return PTR_ERR(mmio);
+
+ etzpc_controller->dev = &pdev->dev;
+ etzpc_controller->mmio = mmio;
+ etzpc_controller->name = dev_driver_string(etzpc_controller->dev);
+ etzpc_controller->type = STM32_PERIPHERAL_FIREWALL | STM32_MEMORY_FIREWALL;
+ etzpc_controller->grant_access = stm32_etzpc_grant_access;
+ etzpc_controller->release_access = stm32_etzpc_release_access;
+
+ /* Get number of etzpc entries*/
+ nb_per = FIELD_GET(ETZPC_HWCFGR_NUM_PER_SEC,
+ readl(etzpc_controller->mmio + ETZPC_HWCFGR));
+ nb_master = FIELD_GET(ETZPC_HWCFGR_NUM_AHB_SEC,
+ readl(etzpc_controller->mmio + ETZPC_HWCFGR));
+ etzpc_controller->max_entries = nb_per + nb_master;
+
+ platform_set_drvdata(pdev, etzpc_controller);
+
+ rc = stm32_firewall_controller_register(etzpc_controller);
+ if (rc) {
+ dev_err(etzpc_controller->dev, "Couldn't register as a firewall controller: %d",
+ rc);
+ return rc;
+ }
+
+ rc = stm32_firewall_populate_bus(etzpc_controller);
+ if (rc) {
+ dev_err(etzpc_controller->dev, "Couldn't populate ETZPC bus: %d",
+ rc);
+ return rc;
+ }
+
+ /* Populate all allowed nodes */
+ return of_platform_populate(np, NULL, NULL, &pdev->dev);
+}
+
+static const struct of_device_id stm32_etzpc_of_match[] = {
+ { .compatible = "st,stm32-etzpc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, stm32_etzpc_of_match);
+
+static struct platform_driver stm32_etzpc_driver = {
+ .probe = stm32_etzpc_probe,
+ .driver = {
+ .name = "stm32-etzpc",
+ .of_match_table = stm32_etzpc_of_match,
+ },
+};
+module_platform_driver(stm32_etzpc_driver);
+
+MODULE_AUTHOR("Gatien Chevallier <gatien.chevallier@foss.st.com>");
+MODULE_DESCRIPTION("STMicroelectronics ETZPC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bus/stm32_firewall.c b/drivers/bus/stm32_firewall.c
new file mode 100644
index 000000000000..2fc9761dadec
--- /dev/null
+++ b/drivers/bus/stm32_firewall.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023, STMicroelectronics - All Rights Reserved
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/bus/stm32_firewall_device.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+
+#include "stm32_firewall.h"
+
+/* Corresponds to STM32_FIREWALL_MAX_EXTRA_ARGS + firewall ID */
+#define STM32_FIREWALL_MAX_ARGS (STM32_FIREWALL_MAX_EXTRA_ARGS + 1)
+
+static LIST_HEAD(firewall_controller_list);
+static DEFINE_MUTEX(firewall_controller_list_lock);
+
+/* Firewall device API */
+
+int stm32_firewall_get_firewall(struct device_node *np, struct stm32_firewall *firewall,
+ unsigned int nb_firewall)
+{
+ struct stm32_firewall_controller *ctrl;
+ struct of_phandle_iterator it;
+ unsigned int i, j = 0;
+ int err;
+
+ if (!firewall || !nb_firewall)
+ return -EINVAL;
+
+ /* Parse property with phandle parsed out */
+ of_for_each_phandle(&it, err, np, "access-controllers", "#access-controller-cells", 0) {
+ struct of_phandle_args provider_args;
+ struct device_node *provider = it.node;
+ const char *fw_entry;
+ bool match = false;
+
+ if (err) {
+ pr_err("Unable to get access-controllers property for node %s\n, err: %d",
+ np->full_name, err);
+ of_node_put(provider);
+ return err;
+ }
+
+ if (j >= nb_firewall) {
+ pr_err("Too many firewall controllers");
+ of_node_put(provider);
+ return -EINVAL;
+ }
+
+ provider_args.args_count = of_phandle_iterator_args(&it, provider_args.args,
+ STM32_FIREWALL_MAX_ARGS);
+
+ /* Check if the parsed phandle corresponds to a registered firewall controller */
+ mutex_lock(&firewall_controller_list_lock);
+ list_for_each_entry(ctrl, &firewall_controller_list, entry) {
+ if (ctrl->dev->of_node->phandle == it.phandle) {
+ match = true;
+ firewall[j].firewall_ctrl = ctrl;
+ break;
+ }
+ }
+ mutex_unlock(&firewall_controller_list_lock);
+
+ if (!match) {
+ firewall[j].firewall_ctrl = NULL;
+ pr_err("No firewall controller registered for %s\n", np->full_name);
+ of_node_put(provider);
+ return -ENODEV;
+ }
+
+ err = of_property_read_string_index(np, "access-controller-names", j, &fw_entry);
+ if (err == 0)
+ firewall[j].entry = fw_entry;
+
+ /* Handle the case when there are no arguments given along with the phandle */
+ if (provider_args.args_count < 0 ||
+ provider_args.args_count > STM32_FIREWALL_MAX_ARGS) {
+ of_node_put(provider);
+ return -EINVAL;
+ } else if (provider_args.args_count == 0) {
+ firewall[j].extra_args_size = 0;
+ firewall[j].firewall_id = U32_MAX;
+ j++;
+ continue;
+ }
+
+ /* The firewall ID is always the first argument */
+ firewall[j].firewall_id = provider_args.args[0];
+
+ /* Extra args start at the second argument */
+ for (i = 0; i < provider_args.args_count - 1; i++)
+ firewall[j].extra_args[i] = provider_args.args[i + 1];
+
+ /* Remove the firewall ID arg that is not an extra argument */
+ firewall[j].extra_args_size = provider_args.args_count - 1;
+
+ j++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_firewall_get_firewall);
+
+int stm32_firewall_grant_access(struct stm32_firewall *firewall)
+{
+ struct stm32_firewall_controller *firewall_controller;
+
+ if (!firewall || firewall->firewall_id == U32_MAX)
+ return -EINVAL;
+
+ firewall_controller = firewall->firewall_ctrl;
+
+ if (!firewall_controller)
+ return -ENODEV;
+
+ return firewall_controller->grant_access(firewall_controller, firewall->firewall_id);
+}
+EXPORT_SYMBOL_GPL(stm32_firewall_grant_access);
+
+int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id)
+{
+ struct stm32_firewall_controller *firewall_controller;
+
+ if (!firewall || subsystem_id == U32_MAX || firewall->firewall_id == U32_MAX)
+ return -EINVAL;
+
+ firewall_controller = firewall->firewall_ctrl;
+
+ if (!firewall_controller)
+ return -ENODEV;
+
+ return firewall_controller->grant_access(firewall_controller, subsystem_id);
+}
+EXPORT_SYMBOL_GPL(stm32_firewall_grant_access_by_id);
+
+void stm32_firewall_release_access(struct stm32_firewall *firewall)
+{
+ struct stm32_firewall_controller *firewall_controller;
+
+ if (!firewall || firewall->firewall_id == U32_MAX) {
+ pr_debug("Incorrect arguments when releasing a firewall access\n");
+ return;
+ }
+
+ firewall_controller = firewall->firewall_ctrl;
+
+ if (!firewall_controller) {
+ pr_debug("No firewall controller to release\n");
+ return;
+ }
+
+ firewall_controller->release_access(firewall_controller, firewall->firewall_id);
+}
+EXPORT_SYMBOL_GPL(stm32_firewall_release_access);
+
+void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id)
+{
+ struct stm32_firewall_controller *firewall_controller;
+
+ if (!firewall || subsystem_id == U32_MAX || firewall->firewall_id == U32_MAX) {
+ pr_debug("Incorrect arguments when releasing a firewall access");
+ return;
+ }
+
+ firewall_controller = firewall->firewall_ctrl;
+
+ if (!firewall_controller) {
+ pr_debug("No firewall controller to release");
+ return;
+ }
+
+ firewall_controller->release_access(firewall_controller, subsystem_id);
+}
+EXPORT_SYMBOL_GPL(stm32_firewall_release_access_by_id);
+
+/* Firewall controller API */
+
+int stm32_firewall_controller_register(struct stm32_firewall_controller *firewall_controller)
+{
+ struct stm32_firewall_controller *ctrl;
+
+ if (!firewall_controller)
+ return -ENODEV;
+
+ pr_info("Registering %s firewall controller\n", firewall_controller->name);
+
+ mutex_lock(&firewall_controller_list_lock);
+ list_for_each_entry(ctrl, &firewall_controller_list, entry) {
+ if (ctrl == firewall_controller) {
+ pr_debug("%s firewall controller already registered\n",
+ firewall_controller->name);
+ mutex_unlock(&firewall_controller_list_lock);
+ return 0;
+ }
+ }
+ list_add_tail(&firewall_controller->entry, &firewall_controller_list);
+ mutex_unlock(&firewall_controller_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_firewall_controller_register);
+
+void stm32_firewall_controller_unregister(struct stm32_firewall_controller *firewall_controller)
+{
+ struct stm32_firewall_controller *ctrl;
+ bool controller_removed = false;
+
+ if (!firewall_controller) {
+ pr_debug("Null reference while unregistering firewall controller\n");
+ return;
+ }
+
+ mutex_lock(&firewall_controller_list_lock);
+ list_for_each_entry(ctrl, &firewall_controller_list, entry) {
+ if (ctrl == firewall_controller) {
+ controller_removed = true;
+ list_del_init(&ctrl->entry);
+ break;
+ }
+ }
+ mutex_unlock(&firewall_controller_list_lock);
+
+ if (!controller_removed)
+ pr_debug("There was no firewall controller named %s to unregister\n",
+ firewall_controller->name);
+}
+EXPORT_SYMBOL_GPL(stm32_firewall_controller_unregister);
+
+int stm32_firewall_populate_bus(struct stm32_firewall_controller *firewall_controller)
+{
+ struct stm32_firewall *firewalls;
+ struct device_node *child;
+ struct device *parent;
+ unsigned int i;
+ int len;
+ int err;
+
+ parent = firewall_controller->dev;
+
+ dev_dbg(parent, "Populating %s system bus\n", dev_name(firewall_controller->dev));
+
+ for_each_available_child_of_node(dev_of_node(parent), child) {
+ /* The access-controllers property is mandatory for firewall bus devices */
+ len = of_count_phandle_with_args(child, "access-controllers",
+ "#access-controller-cells");
+ if (len <= 0) {
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ firewalls = kcalloc(len, sizeof(*firewalls), GFP_KERNEL);
+ if (!firewalls) {
+ of_node_put(child);
+ return -ENOMEM;
+ }
+
+ err = stm32_firewall_get_firewall(child, firewalls, (unsigned int)len);
+ if (err) {
+ kfree(firewalls);
+ of_node_put(child);
+ return err;
+ }
+
+ for (i = 0; i < len; i++) {
+ if (firewall_controller->grant_access(firewall_controller,
+ firewalls[i].firewall_id)) {
+ /*
+ * Peripheral access not allowed or not defined.
+ * Mark the node as populated so platform bus won't probe it
+ */
+ of_detach_node(child);
+ dev_err(parent, "%s: Device driver will not be probed\n",
+ child->full_name);
+ }
+ }
+
+ kfree(firewalls);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_firewall_populate_bus);
diff --git a/drivers/bus/stm32_firewall.h b/drivers/bus/stm32_firewall.h
new file mode 100644
index 000000000000..e5fac85fe346
--- /dev/null
+++ b/drivers/bus/stm32_firewall.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023, STMicroelectronics - All Rights Reserved
+ */
+
+#ifndef _STM32_FIREWALL_H
+#define _STM32_FIREWALL_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+/**
+ * STM32_PERIPHERAL_FIREWALL: This type of firewall protects peripherals
+ * STM32_MEMORY_FIREWALL: This type of firewall protects memories/subsets of memory
+ * zones
+ * STM32_NOTYPE_FIREWALL: Undefined firewall type
+ */
+
+#define STM32_PERIPHERAL_FIREWALL BIT(1)
+#define STM32_MEMORY_FIREWALL BIT(2)
+#define STM32_NOTYPE_FIREWALL BIT(3)
+
+/**
+ * struct stm32_firewall_controller - Information on firewall controller supplying services
+ *
+ * @name: Name of the firewall controller
+ * @dev: Device reference of the firewall controller
+ * @mmio: Base address of the firewall controller
+ * @entry: List entry of the firewall controller list
+ * @type: Type of firewall
+ * @max_entries: Number of entries covered by the firewall
+ * @grant_access: Callback used to grant access for a device access against a
+ * firewall controller
+ * @release_access: Callback used to release resources taken by a device when access was
+ * granted
+ * @grant_memory_range_access: Callback used to grant access for a device to a given memory region
+ */
+struct stm32_firewall_controller {
+ const char *name;
+ struct device *dev;
+ void __iomem *mmio;
+ struct list_head entry;
+ unsigned int type;
+ unsigned int max_entries;
+
+ int (*grant_access)(struct stm32_firewall_controller *ctrl, u32 id);
+ void (*release_access)(struct stm32_firewall_controller *ctrl, u32 id);
+ int (*grant_memory_range_access)(struct stm32_firewall_controller *ctrl, phys_addr_t paddr,
+ size_t size);
+};
+
+/**
+ * stm32_firewall_controller_register - Register a firewall controller to the STM32 firewall
+ * framework
+ * @firewall_controller: Firewall controller to register
+ *
+ * Returns 0 in case of success or -ENODEV if no controller was given.
+ */
+int stm32_firewall_controller_register(struct stm32_firewall_controller *firewall_controller);
+
+/**
+ * stm32_firewall_controller_unregister - Unregister a firewall controller from the STM32
+ * firewall framework
+ * @firewall_controller: Firewall controller to unregister
+ */
+void stm32_firewall_controller_unregister(struct stm32_firewall_controller *firewall_controller);
+
+/**
+ * stm32_firewall_populate_bus - Populate device tree nodes that have a correct firewall
+ * configuration. This is used at boot-time only, as a sanity check
+ * between device tree and firewalls hardware configurations to
+ * prevent a kernel crash when a device driver is not granted access
+ *
+ * @firewall_controller: Firewall controller which nodes will be populated or not
+ *
+ * Returns 0 in case of success or appropriate errno code if error occurred.
+ */
+int stm32_firewall_populate_bus(struct stm32_firewall_controller *firewall_controller);
+
+#endif /* _STM32_FIREWALL_H */
diff --git a/drivers/bus/stm32_rifsc.c b/drivers/bus/stm32_rifsc.c
new file mode 100644
index 000000000000..4cf1b60014b7
--- /dev/null
+++ b/drivers/bus/stm32_rifsc.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023, STMicroelectronics - All Rights Reserved
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include "stm32_firewall.h"
+
+/*
+ * RIFSC offset register
+ */
+#define RIFSC_RISC_SECCFGR0 0x10
+#define RIFSC_RISC_PRIVCFGR0 0x30
+#define RIFSC_RISC_PER0_CIDCFGR 0x100
+#define RIFSC_RISC_PER0_SEMCR 0x104
+#define RIFSC_RISC_HWCFGR2 0xFEC
+
+/*
+ * SEMCR register
+ */
+#define SEMCR_MUTEX BIT(0)
+
+/*
+ * HWCFGR2 register
+ */
+#define HWCFGR2_CONF1_MASK GENMASK(15, 0)
+#define HWCFGR2_CONF2_MASK GENMASK(23, 16)
+#define HWCFGR2_CONF3_MASK GENMASK(31, 24)
+
+/*
+ * RIFSC miscellaneous
+ */
+#define RIFSC_RISC_CFEN_MASK BIT(0)
+#define RIFSC_RISC_SEM_EN_MASK BIT(1)
+#define RIFSC_RISC_SCID_MASK GENMASK(6, 4)
+#define RIFSC_RISC_SEML_SHIFT 16
+#define RIFSC_RISC_SEMWL_MASK GENMASK(23, 16)
+#define RIFSC_RISC_PER_ID_MASK GENMASK(31, 24)
+
+#define RIFSC_RISC_PERx_CID_MASK (RIFSC_RISC_CFEN_MASK | \
+ RIFSC_RISC_SEM_EN_MASK | \
+ RIFSC_RISC_SCID_MASK | \
+ RIFSC_RISC_SEMWL_MASK)
+
+#define IDS_PER_RISC_SEC_PRIV_REGS 32
+
+/* RIF miscellaneous */
+/*
+ * CIDCFGR register fields
+ */
+#define CIDCFGR_CFEN BIT(0)
+#define CIDCFGR_SEMEN BIT(1)
+#define CIDCFGR_SEMWL(x) BIT(RIFSC_RISC_SEML_SHIFT + (x))
+
+#define SEMWL_SHIFT 16
+
+/* Compartiment IDs */
+#define RIF_CID0 0x0
+#define RIF_CID1 0x1
+
+static bool stm32_rifsc_is_semaphore_available(void __iomem *addr)
+{
+ return !(readl(addr) & SEMCR_MUTEX);
+}
+
+static int stm32_rif_acquire_semaphore(struct stm32_firewall_controller *stm32_firewall_controller,
+ int id)
+{
+ void __iomem *addr = stm32_firewall_controller->mmio + RIFSC_RISC_PER0_SEMCR + 0x8 * id;
+
+ writel(SEMCR_MUTEX, addr);
+
+ /* Check that CID1 has the semaphore */
+ if (stm32_rifsc_is_semaphore_available(addr) ||
+ FIELD_GET(RIFSC_RISC_SCID_MASK, readl(addr)) != RIF_CID1)
+ return -EACCES;
+
+ return 0;
+}
+
+static void stm32_rif_release_semaphore(struct stm32_firewall_controller *stm32_firewall_controller,
+ int id)
+{
+ void __iomem *addr = stm32_firewall_controller->mmio + RIFSC_RISC_PER0_SEMCR + 0x8 * id;
+
+ if (stm32_rifsc_is_semaphore_available(addr))
+ return;
+
+ writel(SEMCR_MUTEX, addr);
+
+ /* Ok if another compartment takes the semaphore before the check */
+ WARN_ON(!stm32_rifsc_is_semaphore_available(addr) &&
+ FIELD_GET(RIFSC_RISC_SCID_MASK, readl(addr)) == RIF_CID1);
+}
+
+static int stm32_rifsc_grant_access(struct stm32_firewall_controller *ctrl, u32 firewall_id)
+{
+ struct stm32_firewall_controller *rifsc_controller = ctrl;
+ u32 reg_offset, reg_id, sec_reg_value, cid_reg_value;
+ int rc;
+
+ if (firewall_id >= rifsc_controller->max_entries) {
+ dev_err(rifsc_controller->dev, "Invalid sys bus ID %u", firewall_id);
+ return -EINVAL;
+ }
+
+ /*
+ * RIFSC_RISC_PRIVCFGRx and RIFSC_RISC_SECCFGRx both handle configuration access for
+ * 32 peripherals. On the other hand, there is one _RIFSC_RISC_PERx_CIDCFGR register
+ * per peripheral
+ */
+ reg_id = firewall_id / IDS_PER_RISC_SEC_PRIV_REGS;
+ reg_offset = firewall_id % IDS_PER_RISC_SEC_PRIV_REGS;
+ sec_reg_value = readl(rifsc_controller->mmio + RIFSC_RISC_SECCFGR0 + 0x4 * reg_id);
+ cid_reg_value = readl(rifsc_controller->mmio + RIFSC_RISC_PER0_CIDCFGR + 0x8 * firewall_id);
+
+ /* First check conditions for semaphore mode, which doesn't take into account static CID. */
+ if ((cid_reg_value & CIDCFGR_SEMEN) && (cid_reg_value & CIDCFGR_CFEN)) {
+ if (cid_reg_value & BIT(RIF_CID1 + SEMWL_SHIFT)) {
+ /* Static CID is irrelevant if semaphore mode */
+ goto skip_cid_check;
+ } else {
+ dev_dbg(rifsc_controller->dev,
+ "Invalid bus semaphore configuration: index %d\n", firewall_id);
+ return -EACCES;
+ }
+ }
+
+ /*
+ * Skip CID check if CID filtering isn't enabled or filtering is enabled on CID0, which
+ * corresponds to whatever CID.
+ */
+ if (!(cid_reg_value & CIDCFGR_CFEN) ||
+ FIELD_GET(RIFSC_RISC_SCID_MASK, cid_reg_value) == RIF_CID0)
+ goto skip_cid_check;
+
+ /* Coherency check with the CID configuration */
+ if (FIELD_GET(RIFSC_RISC_SCID_MASK, cid_reg_value) != RIF_CID1) {
+ dev_dbg(rifsc_controller->dev, "Invalid CID configuration for peripheral: %d\n",
+ firewall_id);
+ return -EACCES;
+ }
+
+skip_cid_check:
+ /* Check security configuration */
+ if (sec_reg_value & BIT(reg_offset)) {
+ dev_dbg(rifsc_controller->dev,
+ "Invalid security configuration for peripheral: %d\n", firewall_id);
+ return -EACCES;
+ }
+
+ /*
+ * If the peripheral is in semaphore mode, take the semaphore so that
+ * the CID1 has the ownership.
+ */
+ if ((cid_reg_value & CIDCFGR_SEMEN) && (cid_reg_value & CIDCFGR_CFEN)) {
+ rc = stm32_rif_acquire_semaphore(rifsc_controller, firewall_id);
+ if (rc) {
+ dev_err(rifsc_controller->dev,
+ "Couldn't acquire semaphore for peripheral: %d\n", firewall_id);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static void stm32_rifsc_release_access(struct stm32_firewall_controller *ctrl, u32 firewall_id)
+{
+ stm32_rif_release_semaphore(ctrl, firewall_id);
+}
+
+static int stm32_rifsc_probe(struct platform_device *pdev)
+{
+ struct stm32_firewall_controller *rifsc_controller;
+ struct device_node *np = pdev->dev.of_node;
+ u32 nb_risup, nb_rimu, nb_risal;
+ struct resource *res;
+ void __iomem *mmio;
+ int rc;
+
+ rifsc_controller = devm_kzalloc(&pdev->dev, sizeof(*rifsc_controller), GFP_KERNEL);
+ if (!rifsc_controller)
+ return -ENOMEM;
+
+ mmio = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(mmio))
+ return PTR_ERR(mmio);
+
+ rifsc_controller->dev = &pdev->dev;
+ rifsc_controller->mmio = mmio;
+ rifsc_controller->name = dev_driver_string(rifsc_controller->dev);
+ rifsc_controller->type = STM32_PERIPHERAL_FIREWALL | STM32_MEMORY_FIREWALL;
+ rifsc_controller->grant_access = stm32_rifsc_grant_access;
+ rifsc_controller->release_access = stm32_rifsc_release_access;
+
+ /* Get number of RIFSC entries*/
+ nb_risup = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF1_MASK;
+ nb_rimu = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF2_MASK;
+ nb_risal = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF3_MASK;
+ rifsc_controller->max_entries = nb_risup + nb_rimu + nb_risal;
+
+ platform_set_drvdata(pdev, rifsc_controller);
+
+ rc = stm32_firewall_controller_register(rifsc_controller);
+ if (rc) {
+ dev_err(rifsc_controller->dev, "Couldn't register as a firewall controller: %d",
+ rc);
+ return rc;
+ }
+
+ rc = stm32_firewall_populate_bus(rifsc_controller);
+ if (rc) {
+ dev_err(rifsc_controller->dev, "Couldn't populate RIFSC bus: %d",
+ rc);
+ return rc;
+ }
+
+ /* Populate all allowed nodes */
+ return of_platform_populate(np, NULL, NULL, &pdev->dev);
+}
+
+static const struct of_device_id stm32_rifsc_of_match[] = {
+ { .compatible = "st,stm32mp25-rifsc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, stm32_rifsc_of_match);
+
+static struct platform_driver stm32_rifsc_driver = {
+ .probe = stm32_rifsc_probe,
+ .driver = {
+ .name = "stm32-rifsc",
+ .of_match_table = stm32_rifsc_of_match,
+ },
+};
+module_platform_driver(stm32_rifsc_driver);
+
+MODULE_AUTHOR("Gatien Chevallier <gatien.chevallier@foss.st.com>");
+MODULE_DESCRIPTION("STMicroelectronics RIFSC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bus/sun50i-de2.c b/drivers/bus/sun50i-de2.c
index 3339311ce068..dfe588179aca 100644
--- a/drivers/bus/sun50i-de2.c
+++ b/drivers/bus/sun50i-de2.c
@@ -36,7 +36,7 @@ static const struct of_device_id sun50i_de2_bus_of_match[] = {
static struct platform_driver sun50i_de2_bus_driver = {
.probe = sun50i_de2_bus_probe,
- .remove_new = sun50i_de2_bus_remove,
+ .remove = sun50i_de2_bus_remove,
.driver = {
.name = "sun50i-de2-bus",
.of_match_table = sun50i_de2_bus_of_match,
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index fd3e9d82340a..7a33c3b31d1e 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -128,9 +128,9 @@ struct sunxi_rsb {
};
/* bus / slave device related functions */
-static struct bus_type sunxi_rsb_bus;
+static const struct bus_type sunxi_rsb_bus;
-static int sunxi_rsb_device_match(struct device *dev, struct device_driver *drv)
+static int sunxi_rsb_device_match(struct device *dev, const struct device_driver *drv)
{
return of_driver_match_device(dev, drv);
}
@@ -177,7 +177,7 @@ static int sunxi_rsb_device_modalias(const struct device *dev, struct kobj_ueven
return of_device_uevent_modalias(dev, env);
}
-static struct bus_type sunxi_rsb_bus = {
+static const struct bus_type sunxi_rsb_bus = {
.name = RSB_CTRL_NAME,
.match = sunxi_rsb_device_match,
.probe = sunxi_rsb_device_probe,
@@ -457,7 +457,7 @@ static void regmap_sunxi_rsb_free_ctx(void *context)
kfree(ctx);
}
-static struct regmap_bus regmap_sunxi_rsb = {
+static const struct regmap_bus regmap_sunxi_rsb = {
.reg_write = regmap_sunxi_rsb_reg_write,
.reg_read = regmap_sunxi_rsb_reg_read,
.free_context = regmap_sunxi_rsb_free_ctx,
@@ -751,12 +751,10 @@ static int sunxi_rsb_probe(struct platform_device *pdev)
int irq, ret;
of_property_read_u32(np, "clock-frequency", &clk_freq);
- if (clk_freq > RSB_MAX_FREQ) {
- dev_err(dev,
- "clock-frequency (%u Hz) is too high (max = 20MHz)\n",
- clk_freq);
- return -EINVAL;
- }
+ if (clk_freq > RSB_MAX_FREQ)
+ return dev_err_probe(dev, -EINVAL,
+ "clock-frequency (%u Hz) is too high (max = 20MHz)\n",
+ clk_freq);
rsb = devm_kzalloc(dev, sizeof(*rsb), GFP_KERNEL);
if (!rsb)
@@ -774,28 +772,22 @@ static int sunxi_rsb_probe(struct platform_device *pdev)
return irq;
rsb->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(rsb->clk)) {
- ret = PTR_ERR(rsb->clk);
- dev_err(dev, "failed to retrieve clk: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(rsb->clk))
+ return dev_err_probe(dev, PTR_ERR(rsb->clk),
+ "failed to retrieve clk\n");
rsb->rstc = devm_reset_control_get(dev, NULL);
- if (IS_ERR(rsb->rstc)) {
- ret = PTR_ERR(rsb->rstc);
- dev_err(dev, "failed to retrieve reset controller: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(rsb->rstc))
+ return dev_err_probe(dev, PTR_ERR(rsb->rstc),
+ "failed to retrieve reset controller\n");
init_completion(&rsb->complete);
mutex_init(&rsb->lock);
ret = devm_request_irq(dev, irq, sunxi_rsb_irq, 0, RSB_CTRL_NAME, rsb);
- if (ret) {
- dev_err(dev, "can't register interrupt handler irq %d: %d\n",
- irq, ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "can't register interrupt handler irq %d\n", irq);
ret = sunxi_rsb_hw_init(rsb);
if (ret)
@@ -840,7 +832,7 @@ MODULE_DEVICE_TABLE(of, sunxi_rsb_of_match_table);
static struct platform_driver sunxi_rsb_driver = {
.probe = sunxi_rsb_probe,
- .remove_new = sunxi_rsb_remove,
+ .remove = sunxi_rsb_remove,
.driver = {
.name = RSB_CTRL_NAME,
.of_match_table = sunxi_rsb_of_match_table,
diff --git a/drivers/bus/tegra-aconnect.c b/drivers/bus/tegra-aconnect.c
index de80008bff92..90e3b0a10816 100644
--- a/drivers/bus/tegra-aconnect.c
+++ b/drivers/bus/tegra-aconnect.c
@@ -104,7 +104,7 @@ MODULE_DEVICE_TABLE(of, tegra_aconnect_of_match);
static struct platform_driver tegra_aconnect_driver = {
.probe = tegra_aconnect_probe,
- .remove_new = tegra_aconnect_remove,
+ .remove = tegra_aconnect_remove,
.driver = {
.name = "tegra-aconnect",
.of_match_table = tegra_aconnect_of_match,
diff --git a/drivers/bus/tegra-gmi.c b/drivers/bus/tegra-gmi.c
index f5d6414df9f2..9c09141961d8 100644
--- a/drivers/bus/tegra-gmi.c
+++ b/drivers/bus/tegra-gmi.c
@@ -303,7 +303,7 @@ MODULE_DEVICE_TABLE(of, tegra_gmi_id_table);
static struct platform_driver tegra_gmi_driver = {
.probe = tegra_gmi_probe,
- .remove_new = tegra_gmi_remove,
+ .remove = tegra_gmi_remove,
.driver = {
.name = "tegra-gmi",
.of_match_table = tegra_gmi_id_table,
diff --git a/drivers/bus/ti-pwmss.c b/drivers/bus/ti-pwmss.c
index 4969c556e752..1f2cab91e438 100644
--- a/drivers/bus/ti-pwmss.c
+++ b/drivers/bus/ti-pwmss.c
@@ -44,7 +44,7 @@ static struct platform_driver pwmss_driver = {
.of_match_table = pwmss_of_match,
},
.probe = pwmss_probe,
- .remove_new = pwmss_remove,
+ .remove = pwmss_remove,
};
module_platform_driver(pwmss_driver);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 245e5e827d0d..9f624e5da991 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -1,6 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
/*
* ti-sysc.c - Texas Instruments sysc interconnect target driver
+ *
+ * TI SoCs have an interconnect target wrapper IP for many devices. The wrapper
+ * IP manages clock gating, resets, and PM capabilities for the connected devices.
+ *
+ * Copyright (C) 2017-2024 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ * Many features are based on the earlier omap_hwmod arch code with thanks to all
+ * the people who developed and debugged the code over the years:
+ *
+ * Copyright (C) 2009-2011 Nokia Corporation
+ * Copyright (C) 2011-2021 Texas Instruments Incorporated - https://www.ti.com/
*/
#include <linux/io.h>
@@ -115,7 +126,6 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = {
* @enabled: sysc runtime enabled status
* @needs_resume: runtime resume needed on resume from suspend
* @child_needs_resume: runtime resume needed for child on resume from suspend
- * @disable_on_idle: status flag used for disabling modules with resets
* @idle_work: work structure used to perform delayed idle on a module
* @pre_reset_quirk: module specific pre-reset quirk
* @post_reset_quirk: module specific post-reset quirk
@@ -667,51 +677,6 @@ static int sysc_parse_and_check_child_range(struct sysc *ddata)
return 0;
}
-/* Interconnect instances to probe before l4_per instances */
-static struct resource early_bus_ranges[] = {
- /* am3/4 l4_wkup */
- { .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
- /* omap4/5 and dra7 l4_cfg */
- { .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
- /* omap4 l4_wkup */
- { .start = 0x4a300000, .end = 0x4a300000 + 0x30000, },
- /* omap5 and dra7 l4_wkup without dra7 dcan segment */
- { .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000, },
-};
-
-static atomic_t sysc_defer = ATOMIC_INIT(10);
-
-/**
- * sysc_defer_non_critical - defer non_critical interconnect probing
- * @ddata: device driver data
- *
- * We want to probe l4_cfg and l4_wkup interconnect instances before any
- * l4_per instances as l4_per instances depend on resources on l4_cfg and
- * l4_wkup interconnects.
- */
-static int sysc_defer_non_critical(struct sysc *ddata)
-{
- struct resource *res;
- int i;
-
- if (!atomic_read(&sysc_defer))
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
- res = &early_bus_ranges[i];
- if (ddata->module_pa >= res->start &&
- ddata->module_pa <= res->end) {
- atomic_set(&sysc_defer, 0);
-
- return 0;
- }
- }
-
- atomic_dec_if_positive(&sysc_defer);
-
- return -EPROBE_DEFER;
-}
-
static struct device_node *stdout_path;
static void sysc_init_stdout_path(struct sysc *ddata)
@@ -937,10 +902,6 @@ static int sysc_map_and_check_registers(struct sysc *ddata)
if (error)
return error;
- error = sysc_defer_non_critical(ddata);
- if (error)
- return error;
-
sysc_check_children(ddata);
if (!of_property_present(np, "reg"))
@@ -1458,8 +1419,7 @@ static int __maybe_unused sysc_noirq_suspend(struct device *dev)
ddata = dev_get_drvdata(dev);
- if (ddata->cfg.quirks &
- (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
+ if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)
return 0;
if (!ddata->enabled)
@@ -1477,8 +1437,7 @@ static int __maybe_unused sysc_noirq_resume(struct device *dev)
ddata = dev_get_drvdata(dev);
- if (ddata->cfg.quirks &
- (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
+ if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)
return 0;
if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_RESUME) {
@@ -1529,19 +1488,6 @@ struct sysc_revision_quirk {
}
static const struct sysc_revision_quirk sysc_revision_quirks[] = {
- /* These drivers need to be fixed to not use pm_runtime_irq_safe() */
- SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
- SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
- /* Uarts on omap4 and later */
- SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
- SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
- SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
-
/* Quirks that need to be set based on the module address */
SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT |
@@ -1599,6 +1545,17 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("sata", 0, 0xfc, 0x1100, -ENODEV, 0x5e412000, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT),
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT),
+ /* Uarts on omap4 and later */
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT),
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT),
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE_ACT),
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
@@ -2030,6 +1987,21 @@ static void sysc_module_disable_quirk_pruss(struct sysc *ddata)
sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
}
+static void sysc_module_enable_quirk_pruss(struct sysc *ddata)
+{
+ u32 reg;
+
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ /*
+ * Clearing the SYSC_PRUSS_STANDBY_INIT bit - Updates OCP master
+ * port configuration to enable memory access outside of the
+ * PRU-ICSS subsystem.
+ */
+ reg &= (~SYSC_PRUSS_STANDBY_INIT);
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+}
+
static void sysc_init_module_quirks(struct sysc *ddata)
{
if (ddata->legacy_mode || !ddata->name)
@@ -2082,8 +2054,10 @@ static void sysc_init_module_quirks(struct sysc *ddata)
ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
}
- if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS)
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS) {
+ ddata->module_enable_quirk = sysc_module_enable_quirk_pruss;
ddata->module_disable_quirk = sysc_module_disable_quirk_pruss;
+ }
}
static int sysc_clockdomain_init(struct sysc *ddata)
@@ -2145,8 +2119,7 @@ static int sysc_reset(struct sysc *ddata)
sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
if (ddata->legacy_mode ||
- ddata->cap->regbits->srst_shift < 0 ||
- ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
+ ddata->cap->regbits->srst_shift < 0)
return 0;
sysc_mask = BIT(ddata->cap->regbits->srst_shift);
@@ -2240,12 +2213,14 @@ static int sysc_init_module(struct sysc *ddata)
goto err_main_clocks;
}
- error = sysc_reset(ddata);
- if (error)
- dev_err(ddata->dev, "Reset failed with %d\n", error);
+ if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) {
+ error = sysc_reset(ddata);
+ if (error)
+ dev_err(ddata->dev, "Reset failed with %d\n", error);
- if (error && !ddata->legacy_mode)
- sysc_disable_module(ddata->dev);
+ if (error && !ddata->legacy_mode)
+ sysc_disable_module(ddata->dev);
+ }
err_main_clocks:
if (error)
@@ -2283,11 +2258,9 @@ static int sysc_init_idlemode(struct sysc *ddata, u8 *idlemodes,
const char *name)
{
struct device_node *np = ddata->dev->of_node;
- struct property *prop;
- const __be32 *p;
u32 val;
- of_property_for_each_u32(np, name, prop, p, val) {
+ of_property_for_each_u32(np, name, val) {
if (val >= SYSC_NR_IDLEMODES) {
dev_err(ddata->dev, "invalid idlemode: %i\n", val);
return -EINVAL;
@@ -2400,7 +2373,7 @@ static int sysc_child_add_clocks(struct sysc *ddata,
return 0;
}
-static struct device_type sysc_device_type = {
+static const struct device_type sysc_device_type = {
};
static struct sysc *sysc_child_to_parent(struct device *dev)
@@ -2447,89 +2420,6 @@ static int __maybe_unused sysc_child_runtime_resume(struct device *dev)
return pm_generic_runtime_resume(dev);
}
-#ifdef CONFIG_PM_SLEEP
-static int sysc_child_suspend_noirq(struct device *dev)
-{
- struct sysc *ddata;
- int error;
-
- ddata = sysc_child_to_parent(dev);
-
- dev_dbg(ddata->dev, "%s %s\n", __func__,
- ddata->name ? ddata->name : "");
-
- error = pm_generic_suspend_noirq(dev);
- if (error) {
- dev_err(dev, "%s error at %i: %i\n",
- __func__, __LINE__, error);
-
- return error;
- }
-
- if (!pm_runtime_status_suspended(dev)) {
- error = pm_generic_runtime_suspend(dev);
- if (error) {
- dev_dbg(dev, "%s busy at %i: %i\n",
- __func__, __LINE__, error);
-
- return 0;
- }
-
- error = sysc_runtime_suspend(ddata->dev);
- if (error) {
- dev_err(dev, "%s error at %i: %i\n",
- __func__, __LINE__, error);
-
- return error;
- }
-
- ddata->child_needs_resume = true;
- }
-
- return 0;
-}
-
-static int sysc_child_resume_noirq(struct device *dev)
-{
- struct sysc *ddata;
- int error;
-
- ddata = sysc_child_to_parent(dev);
-
- dev_dbg(ddata->dev, "%s %s\n", __func__,
- ddata->name ? ddata->name : "");
-
- if (ddata->child_needs_resume) {
- ddata->child_needs_resume = false;
-
- error = sysc_runtime_resume(ddata->dev);
- if (error)
- dev_err(ddata->dev,
- "%s runtime resume error: %i\n",
- __func__, error);
-
- error = pm_generic_runtime_resume(dev);
- if (error)
- dev_err(ddata->dev,
- "%s generic runtime resume: %i\n",
- __func__, error);
- }
-
- return pm_generic_resume_noirq(dev);
-}
-#endif
-
-static struct dev_pm_domain sysc_child_pm_domain = {
- .ops = {
- SET_RUNTIME_PM_OPS(sysc_child_runtime_suspend,
- sysc_child_runtime_resume,
- NULL)
- USE_PLATFORM_PM_SLEEP_OPS
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_child_suspend_noirq,
- sysc_child_resume_noirq)
- }
-};
-
/* Caller needs to take list_lock if ever used outside of cpu_pm */
static void sysc_reinit_modules(struct sysc_soc_info *soc)
{
@@ -2600,25 +2490,6 @@ out_unlock:
mutex_unlock(&sysc_soc->list_lock);
}
-/**
- * sysc_legacy_idle_quirk - handle children in omap_device compatible way
- * @ddata: device driver data
- * @child: child device driver
- *
- * Allow idle for child devices as done with _od_runtime_suspend().
- * Otherwise many child devices will not idle because of the permanent
- * parent usecount set in pm_runtime_irq_safe().
- *
- * Note that the long term solution is to just modify the child device
- * drivers to not set pm_runtime_irq_safe() and then this can be just
- * dropped.
- */
-static void sysc_legacy_idle_quirk(struct sysc *ddata, struct device *child)
-{
- if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE)
- dev_pm_domain_set(child, &sysc_child_pm_domain);
-}
-
static int sysc_notifier_call(struct notifier_block *nb,
unsigned long event, void *device)
{
@@ -2635,7 +2506,6 @@ static int sysc_notifier_call(struct notifier_block *nb,
error = sysc_child_add_clocks(ddata, dev);
if (error)
return error;
- sysc_legacy_idle_quirk(ddata, dev);
break;
default:
break;
@@ -2666,14 +2536,12 @@ static const struct sysc_dts_quirk sysc_dts_quirks[] = {
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
bool is_child)
{
- const struct property *prop;
- int i, len;
+ int i;
for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) {
const char *name = sysc_dts_quirks[i].name;
- prop = of_get_property(np, name, &len);
- if (!prop)
+ if (!of_property_present(np, name))
continue;
ddata->cfg.quirks |= sysc_dts_quirks[i].mask;
@@ -2859,8 +2727,7 @@ static const struct sysc_capabilities sysc_34xx_sr = {
.type = TI_SYSC_OMAP34XX_SR,
.sysc_mask = SYSC_OMAP2_CLOCKACTIVITY,
.regbits = &sysc_regbits_omap34xx_sr,
- .mod_quirks = SYSC_QUIRK_USE_CLOCKACT | SYSC_QUIRK_UNCACHED |
- SYSC_QUIRK_LEGACY_IDLE,
+ .mod_quirks = SYSC_QUIRK_USE_CLOCKACT | SYSC_QUIRK_UNCACHED,
};
/*
@@ -2881,13 +2748,12 @@ static const struct sysc_capabilities sysc_36xx_sr = {
.type = TI_SYSC_OMAP36XX_SR,
.sysc_mask = SYSC_OMAP3_SR_ENAWAKEUP,
.regbits = &sysc_regbits_omap36xx_sr,
- .mod_quirks = SYSC_QUIRK_UNCACHED | SYSC_QUIRK_LEGACY_IDLE,
+ .mod_quirks = SYSC_QUIRK_UNCACHED,
};
static const struct sysc_capabilities sysc_omap4_sr = {
.type = TI_SYSC_OMAP4_SR,
.regbits = &sysc_regbits_omap36xx_sr,
- .mod_quirks = SYSC_QUIRK_LEGACY_IDLE,
};
/*
@@ -3447,7 +3313,7 @@ MODULE_DEVICE_TABLE(of, sysc_match);
static struct platform_driver sysc_driver = {
.probe = sysc_probe,
- .remove_new = sysc_remove,
+ .remove = sysc_remove,
.driver = {
.name = "ti-sysc",
.of_match_table = sysc_match,
diff --git a/drivers/bus/ts-nbus.c b/drivers/bus/ts-nbus.c
index 4fa932cb0915..2328c48b9b12 100644
--- a/drivers/bus/ts-nbus.c
+++ b/drivers/bus/ts-nbus.c
@@ -39,45 +39,39 @@ struct ts_nbus {
/*
* request all gpios required by the bus.
*/
-static int ts_nbus_init_pdata(struct platform_device *pdev, struct ts_nbus
- *ts_nbus)
+static int ts_nbus_init_pdata(struct platform_device *pdev,
+ struct ts_nbus *ts_nbus)
{
ts_nbus->data = devm_gpiod_get_array(&pdev->dev, "ts,data",
GPIOD_OUT_HIGH);
- if (IS_ERR(ts_nbus->data)) {
- dev_err(&pdev->dev, "failed to retrieve ts,data-gpio from dts\n");
- return PTR_ERR(ts_nbus->data);
- }
+ if (IS_ERR(ts_nbus->data))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ts_nbus->data),
+ "failed to retrieve ts,data-gpio from dts\n");
ts_nbus->csn = devm_gpiod_get(&pdev->dev, "ts,csn", GPIOD_OUT_HIGH);
- if (IS_ERR(ts_nbus->csn)) {
- dev_err(&pdev->dev, "failed to retrieve ts,csn-gpio from dts\n");
- return PTR_ERR(ts_nbus->csn);
- }
+ if (IS_ERR(ts_nbus->csn))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ts_nbus->csn),
+ "failed to retrieve ts,csn-gpio from dts\n");
ts_nbus->txrx = devm_gpiod_get(&pdev->dev, "ts,txrx", GPIOD_OUT_HIGH);
- if (IS_ERR(ts_nbus->txrx)) {
- dev_err(&pdev->dev, "failed to retrieve ts,txrx-gpio from dts\n");
- return PTR_ERR(ts_nbus->txrx);
- }
+ if (IS_ERR(ts_nbus->txrx))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ts_nbus->txrx),
+ "failed to retrieve ts,txrx-gpio from dts\n");
ts_nbus->strobe = devm_gpiod_get(&pdev->dev, "ts,strobe", GPIOD_OUT_HIGH);
- if (IS_ERR(ts_nbus->strobe)) {
- dev_err(&pdev->dev, "failed to retrieve ts,strobe-gpio from dts\n");
- return PTR_ERR(ts_nbus->strobe);
- }
+ if (IS_ERR(ts_nbus->strobe))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ts_nbus->strobe),
+ "failed to retrieve ts,strobe-gpio from dts\n");
ts_nbus->ale = devm_gpiod_get(&pdev->dev, "ts,ale", GPIOD_OUT_HIGH);
- if (IS_ERR(ts_nbus->ale)) {
- dev_err(&pdev->dev, "failed to retrieve ts,ale-gpio from dts\n");
- return PTR_ERR(ts_nbus->ale);
- }
+ if (IS_ERR(ts_nbus->ale))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ts_nbus->ale),
+ "failed to retrieve ts,ale-gpio from dts\n");
ts_nbus->rdy = devm_gpiod_get(&pdev->dev, "ts,rdy", GPIOD_IN);
- if (IS_ERR(ts_nbus->rdy)) {
- dev_err(&pdev->dev, "failed to retrieve ts,rdy-gpio from dts\n");
- return PTR_ERR(ts_nbus->rdy);
- }
+ if (IS_ERR(ts_nbus->rdy))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ts_nbus->rdy),
+ "failed to retrieve ts,rdy-gpio from dts\n");
return 0;
}
@@ -273,7 +267,7 @@ EXPORT_SYMBOL_GPL(ts_nbus_write);
static int ts_nbus_probe(struct platform_device *pdev)
{
struct pwm_device *pwm;
- struct pwm_args pargs;
+ struct pwm_state state;
struct device *dev = &pdev->dev;
struct ts_nbus *ts_nbus;
int ret;
@@ -289,32 +283,24 @@ static int ts_nbus_probe(struct platform_device *pdev)
return ret;
pwm = devm_pwm_get(dev, NULL);
- if (IS_ERR(pwm)) {
- ret = PTR_ERR(pwm);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "unable to request PWM\n");
- return ret;
- }
+ if (IS_ERR(pwm))
+ return dev_err_probe(dev, PTR_ERR(pwm),
+ "unable to request PWM\n");
- pwm_get_args(pwm, &pargs);
- if (!pargs.period) {
- dev_err(&pdev->dev, "invalid PWM period\n");
- return -EINVAL;
- }
+ pwm_init_state(pwm, &state);
+ if (!state.period)
+ return dev_err_probe(dev, -EINVAL, "invalid PWM period\n");
- /*
- * FIXME: pwm_apply_args() should be removed when switching to
- * the atomic PWM API.
- */
- pwm_apply_args(pwm);
- ret = pwm_config(pwm, pargs.period, pargs.period);
+ state.duty_cycle = state.period;
+ state.enabled = true;
+
+ ret = pwm_apply_might_sleep(pwm, &state);
if (ret < 0)
- return ret;
+ return dev_err_probe(dev, ret, "failed to configure PWM\n");
/*
* we can now start the FPGA and populate the peripherals.
*/
- pwm_enable(pwm);
ts_nbus->pwm = pwm;
/*
@@ -324,7 +310,8 @@ static int ts_nbus_probe(struct platform_device *pdev)
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret < 0)
- return ret;
+ return dev_err_probe(dev, ret,
+ "failed to populate platform devices on bus\n");
dev_info(dev, "initialized\n");
@@ -349,7 +336,7 @@ MODULE_DEVICE_TABLE(of, ts_nbus_of_match);
static struct platform_driver ts_nbus_driver = {
.probe = ts_nbus_probe,
- .remove_new = ts_nbus_remove,
+ .remove = ts_nbus_remove,
.driver = {
.name = "ts_nbus",
.of_match_table = ts_nbus_of_match,
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
index d2c7ada90186..64ee920721ee 100644
--- a/drivers/bus/vexpress-config.c
+++ b/drivers/bus/vexpress-config.c
@@ -414,4 +414,5 @@ static struct platform_driver vexpress_syscfg_driver = {
.probe = vexpress_syscfg_probe,
};
module_platform_driver(vexpress_syscfg_driver);
+MODULE_DESCRIPTION("Versatile Express configuration bus");
MODULE_LICENSE("GPL v2");