diff options
Diffstat (limited to 'drivers/bus')
56 files changed, 3885 insertions, 1492 deletions
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 7bfe998f3514..fe7600283e70 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -31,7 +31,7 @@ config ARM_INTEGRATOR_LM config BRCMSTB_GISB_ARB tristate "Broadcom STB GISB bus arbiter" - depends on ARM || ARM64 || MIPS + depends on ARCH_BRCMSTB || BMIPS_GENERIC default ARCH_BRCMSTB || BMIPS_GENERIC help Driver for the Broadcom Set Top Box System-on-a-chip internal bus @@ -81,15 +81,21 @@ config MOXTET config HISILICON_LPC bool "Support for ISA I/O space on HiSilicon Hip06/7" depends on (ARM64 && ARCH_HISI) || (COMPILE_TEST && !ALPHA && !HEXAGON && !PARISC) - depends on HAS_IOMEM + depends on HAS_IOPORT select INDIRECT_PIO if ARM64 help Driver to enable I/O access to devices attached to the Low Pin Count bus on the HiSilicon Hip06/7 SoC. +config IMX_AIPSTZ + tristate "Support for IMX Secure AHB to IP Slave bus (AIPSTZ) bridge" + depends on ARCH_MXC + help + Enable support for IMX AIPSTZ bridge. + config IMX_WEIM bool "Freescale EIM DRIVER" - depends on ARCH_MXC + depends on ARCH_MXC || COMPILE_TEST help Driver for i.MX WEIM controller. The WEIM(Wireless External Interface Module) works like a bus. @@ -163,6 +169,16 @@ config QCOM_SSC_BLOCK_BUS i2c/spi/uart controllers, a hexagon core, and a clock controller which provides clocks for the above. +config STM32_FIREWALL + bool "STM32 Firewall framework" + depends on (ARCH_STM32 || COMPILE_TEST) && OF + select OF_DYNAMIC + help + Say y to enable STM32 firewall framework and its services. Firewall + controllers will be able to register to the framework. Access for + hardware resources linked to a firewall controller can be requested + through this STM32 framework. + config SUN50I_DE2_BUS bool "Allwinner A64 DE2 Bus Driver" default ARM64 @@ -186,11 +202,12 @@ config SUNXI_RSB config TEGRA_ACONNECT tristate "Tegra ACONNECT Bus Driver" - depends on ARCH_TEGRA_210_SOC + depends on ARCH_TEGRA depends on OF && PM help Driver for the Tegra ACONNECT bus which is used to interface with - the devices inside the Audio Processing Engine (APE) for Tegra210. + the devices inside the Audio Processing Engine (APE) for + Tegra210 and later. config TEGRA_GMI tristate "Tegra Generic Memory Interface bus driver" @@ -210,7 +227,8 @@ config TI_PWMSS config TI_SYSC bool "TI sysc interconnect target module driver" - depends on ARCH_OMAP2PLUS + depends on ARCH_OMAP2PLUS || ARCH_K3 + default y help Generic driver for Texas Instruments interconnect target module found on many TI SoCs. diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index d90eed189a65..8e693fe8a03a 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/ obj-$(CONFIG_BT1_APB) += bt1-apb.o obj-$(CONFIG_BT1_AXI) += bt1-axi.o +obj-$(CONFIG_IMX_AIPSTZ) += imx-aipstz.o obj-$(CONFIG_IMX_WEIM) += imx-weim.o obj-$(CONFIG_INTEL_IXP4XX_EB) += intel-ixp4xx-eb.o obj-$(CONFIG_MIPS_CDMM) += mips_cdmm.o @@ -26,6 +27,7 @@ obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o obj-$(CONFIG_QCOM_EBI2) += qcom-ebi2.o obj-$(CONFIG_QCOM_SSC_BLOCK_BUS) += qcom-ssc-block-bus.o +obj-$(CONFIG_STM32_FIREWALL) += stm32_firewall.o stm32_rifsc.o stm32_etzpc.o obj-$(CONFIG_SUN50I_DE2_BUS) += sun50i-de2.o obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o obj-$(CONFIG_OF) += simple-pm-bus.o diff --git a/drivers/bus/arm-integrator-lm.c b/drivers/bus/arm-integrator-lm.c index 2344d560b144..a65c79b08804 100644 --- a/drivers/bus/arm-integrator-lm.c +++ b/drivers/bus/arm-integrator-lm.c @@ -85,6 +85,7 @@ static int integrator_ap_lm_probe(struct platform_device *pdev) return -ENODEV; } map = syscon_node_to_regmap(syscon); + of_node_put(syscon); if (IS_ERR(map)) { dev_err(dev, "could not find Integrator/AP system controller\n"); @@ -126,4 +127,3 @@ static struct platform_driver integrator_ap_lm_driver = { module_platform_driver(integrator_ap_lm_driver); MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>"); MODULE_DESCRIPTION("Integrator AP Logical Module driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c index b0c3704777e9..91ef99c42344 100644 --- a/drivers/bus/brcmstb_gisb.c +++ b/drivers/bus/brcmstb_gisb.c @@ -96,6 +96,20 @@ static const int gisb_offsets_bcm7400[] = { [ARB_ERR_CAP_MASTER] = 0x0d8, }; +static const int gisb_offsets_bcm74165[] = { + [ARB_TIMER] = 0x008, + [ARB_BP_CAP_CLR] = 0x044, + [ARB_BP_CAP_HI_ADDR] = -1, + [ARB_BP_CAP_ADDR] = 0x048, + [ARB_BP_CAP_STATUS] = 0x058, + [ARB_BP_CAP_MASTER] = 0x05c, + [ARB_ERR_CAP_CLR] = 0x038, + [ARB_ERR_CAP_HI_ADDR] = -1, + [ARB_ERR_CAP_ADDR] = 0x020, + [ARB_ERR_CAP_STATUS] = 0x030, + [ARB_ERR_CAP_MASTER] = 0x034, +}; + static const int gisb_offsets_bcm7435[] = { [ARB_TIMER] = 0x00c, [ARB_BP_CAP_CLR] = 0x014, @@ -381,10 +395,7 @@ static struct attribute *gisb_arb_sysfs_attrs[] = { &dev_attr_gisb_arb_timeout.attr, NULL, }; - -static struct attribute_group gisb_arb_sysfs_attr_group = { - .attrs = gisb_arb_sysfs_attrs, -}; +ATTRIBUTE_GROUPS(gisb_arb_sysfs); static const struct of_device_id brcmstb_gisb_arb_of_match[] = { { .compatible = "brcm,gisb-arb", .data = gisb_offsets_bcm7445 }, @@ -393,20 +404,20 @@ static const struct of_device_id brcmstb_gisb_arb_of_match[] = { { .compatible = "brcm,bcm7400-gisb-arb", .data = gisb_offsets_bcm7400 }, { .compatible = "brcm,bcm7278-gisb-arb", .data = gisb_offsets_bcm7278 }, { .compatible = "brcm,bcm7038-gisb-arb", .data = gisb_offsets_bcm7038 }, + { .compatible = "brcm,bcm74165-gisb-arb", .data = gisb_offsets_bcm74165 }, { }, }; +MODULE_DEVICE_TABLE(of, brcmstb_gisb_arb_of_match); static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev) { struct device_node *dn = pdev->dev.of_node; struct brcmstb_gisb_arb_device *gdev; const struct of_device_id *of_id; - struct resource *r; int err, timeout_irq, tea_irq, bp_irq; unsigned int num_masters, j = 0; int i, first, last; - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); timeout_irq = platform_get_irq(pdev, 0); tea_irq = platform_get_irq(pdev, 1); bp_irq = platform_get_irq(pdev, 2); @@ -418,7 +429,7 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev) mutex_init(&gdev->lock); INIT_LIST_HEAD(&gdev->next); - gdev->base = devm_ioremap_resource(&pdev->dev, r); + gdev->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(gdev->base)) return PTR_ERR(gdev->base); @@ -476,10 +487,6 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev) } } - err = sysfs_create_group(&pdev->dev.kobj, &gisb_arb_sysfs_attr_group); - if (err) - return err; - platform_set_drvdata(pdev, gdev); list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list); @@ -536,6 +543,7 @@ static struct platform_driver brcmstb_gisb_arb_driver = { .name = "brcm-gisb-arb", .of_match_table = brcmstb_gisb_arb_of_match, .pm = &brcmstb_gisb_arb_pm_ops, + .dev_groups = gisb_arb_sysfs_groups, }, }; diff --git a/drivers/bus/bt1-apb.c b/drivers/bus/bt1-apb.c index 63b1b4a76671..7463124b6dd9 100644 --- a/drivers/bus/bt1-apb.c +++ b/drivers/bus/bt1-apb.c @@ -22,7 +22,6 @@ #include <linux/clk.h> #include <linux/reset.h> #include <linux/time64.h> -#include <linux/clk.h> #include <linux/sysfs.h> #define APB_EHB_ISR 0x00 @@ -186,34 +185,13 @@ static int bt1_apb_request_rst(struct bt1_apb *apb) return ret; } -static void bt1_apb_disable_clk(void *data) -{ - struct bt1_apb *apb = data; - - clk_disable_unprepare(apb->pclk); -} - static int bt1_apb_request_clk(struct bt1_apb *apb) { - int ret; - - apb->pclk = devm_clk_get(apb->dev, "pclk"); + apb->pclk = devm_clk_get_enabled(apb->dev, "pclk"); if (IS_ERR(apb->pclk)) return dev_err_probe(apb->dev, PTR_ERR(apb->pclk), "Couldn't get APB clock descriptor\n"); - ret = clk_prepare_enable(apb->pclk); - if (ret) { - dev_err(apb->dev, "Couldn't enable the APB clock\n"); - return ret; - } - - ret = devm_add_action_or_reset(apb->dev, bt1_apb_disable_clk, apb); - if (ret) { - dev_err(apb->dev, "Can't add APB EHB clocks disable action\n"); - return ret; - } - apb->rate = clk_get_rate(apb->pclk); if (!apb->rate) { dev_err(apb->dev, "Invalid clock rate\n"); @@ -416,4 +394,3 @@ module_platform_driver(bt1_apb_driver); MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>"); MODULE_DESCRIPTION("Baikal-T1 APB-bus driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/bus/bt1-axi.c b/drivers/bus/bt1-axi.c index 70e49a6e5374..a5254c73bf43 100644 --- a/drivers/bus/bt1-axi.c +++ b/drivers/bus/bt1-axi.c @@ -146,33 +146,14 @@ static int bt1_axi_request_rst(struct bt1_axi *axi) return ret; } -static void bt1_axi_disable_clk(void *data) -{ - struct bt1_axi *axi = data; - - clk_disable_unprepare(axi->aclk); -} - static int bt1_axi_request_clk(struct bt1_axi *axi) { - int ret; - - axi->aclk = devm_clk_get(axi->dev, "aclk"); + axi->aclk = devm_clk_get_enabled(axi->dev, "aclk"); if (IS_ERR(axi->aclk)) return dev_err_probe(axi->dev, PTR_ERR(axi->aclk), "Couldn't get AXI Interconnect clock\n"); - ret = clk_prepare_enable(axi->aclk); - if (ret) { - dev_err(axi->dev, "Couldn't enable the AXI clock\n"); - return ret; - } - - ret = devm_add_action_or_reset(axi->dev, bt1_axi_disable_clk, axi); - if (ret) - dev_err(axi->dev, "Can't add AXI clock disable action\n"); - - return ret; + return 0; } static int bt1_axi_request_irq(struct bt1_axi *axi) @@ -309,4 +290,3 @@ module_platform_driver(bt1_axi_driver); MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>"); MODULE_DESCRIPTION("Baikal-T1 AXI-bus driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/bus/fsl-mc/Kconfig b/drivers/bus/fsl-mc/Kconfig index b1fd55901c50..9492342e7d13 100644 --- a/drivers/bus/fsl-mc/Kconfig +++ b/drivers/bus/fsl-mc/Kconfig @@ -8,7 +8,7 @@ config FSL_MC_BUS bool "QorIQ DPAA2 fsl-mc bus driver" depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86_LOCAL_APIC || PPC))) - select GENERIC_MSI_IRQ_DOMAIN + select GENERIC_MSI_IRQ help Driver to enable the bus infrastructure for the QorIQ DPAA2 architecture. The fsl-mc bus driver handles discovery of diff --git a/drivers/bus/fsl-mc/dpmcp.c b/drivers/bus/fsl-mc/dpmcp.c index 5fbd0dbde24a..7816c0a728ef 100644 --- a/drivers/bus/fsl-mc/dpmcp.c +++ b/drivers/bus/fsl-mc/dpmcp.c @@ -75,25 +75,3 @@ int dpmcp_close(struct fsl_mc_io *mc_io, /* send command to mc*/ return mc_send_command(mc_io, &cmd); } - -/** - * dpmcp_reset() - Reset the DPMCP, returns the object to initial state. - * @mc_io: Pointer to MC portal's I/O object - * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' - * @token: Token of DPMCP object - * - * Return: '0' on Success; Error code otherwise. - */ -int dpmcp_reset(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token) -{ - struct fsl_mc_command cmd = { 0 }; - - /* prepare command */ - cmd.header = mc_encode_cmd_header(DPMCP_CMDID_RESET, - cmd_flags, token); - - /* send command to mc*/ - return mc_send_command(mc_io, &cmd); -} diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c index 5e70f9775a0e..c63a7e688db6 100644 --- a/drivers/bus/fsl-mc/dprc-driver.c +++ b/drivers/bus/fsl-mc/dprc-driver.c @@ -11,7 +11,6 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/interrupt.h> -#include <linux/msi.h> #include <linux/fsl/mc.h> #include "fsl-mc-private.h" @@ -23,8 +22,8 @@ struct fsl_mc_child_objs { struct fsl_mc_obj_desc *child_array; }; -static bool fsl_mc_device_match(struct fsl_mc_device *mc_dev, - struct fsl_mc_obj_desc *obj_desc) +static bool fsl_mc_device_match(const struct fsl_mc_device *mc_dev, + const struct fsl_mc_obj_desc *obj_desc) { return mc_dev->obj_desc.id == obj_desc->id && strcmp(mc_dev->obj_desc.type, obj_desc->type) == 0; @@ -46,6 +45,9 @@ static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data) struct fsl_mc_child_objs *objs; struct fsl_mc_device *mc_dev; + if (!dev_is_fsl_mc(dev)) + return 0; + mc_dev = to_fsl_mc_device(dev); objs = data; @@ -65,6 +67,9 @@ static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data) static int __fsl_mc_device_remove(struct device *dev, void *data) { + if (!dev_is_fsl_mc(dev)) + return 0; + fsl_mc_device_remove(to_fsl_mc_device(dev)); return 0; } @@ -107,9 +112,9 @@ void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev, } EXPORT_SYMBOL_GPL(dprc_remove_devices); -static int __fsl_mc_device_match(struct device *dev, void *data) +static int __fsl_mc_device_match(struct device *dev, const void *data) { - struct fsl_mc_obj_desc *obj_desc = data; + const struct fsl_mc_obj_desc *obj_desc = data; struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); return fsl_mc_device_match(mc_dev, obj_desc); @@ -801,8 +806,6 @@ int dprc_cleanup(struct fsl_mc_device *mc_dev) dev_set_msi_domain(&mc_dev->dev, NULL); } - fsl_mc_cleanup_all_resource_pools(mc_dev); - /* if this step fails we cannot go further with cleanup as there is no way of * communicating with the firmware */ @@ -836,15 +839,14 @@ EXPORT_SYMBOL_GPL(dprc_cleanup); * It tears down the interrupts that were configured for the DPRC device. * It destroys the interrupt pool associated with this MC bus. */ -static int dprc_remove(struct fsl_mc_device *mc_dev) +static void dprc_remove(struct fsl_mc_device *mc_dev) { struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); - if (!is_fsl_mc_bus_dprc(mc_dev)) - return -EINVAL; - - if (!mc_bus->irq_resources) - return -EINVAL; + if (!mc_bus->irq_resources) { + dev_err(&mc_dev->dev, "No irq resources, so unbinding the device failed\n"); + return; + } if (dev_get_msi_domain(&mc_dev->dev)) dprc_teardown_irq(mc_dev); @@ -854,7 +856,6 @@ static int dprc_remove(struct fsl_mc_device *mc_dev) dprc_cleanup(mc_dev); dev_info(&mc_dev->dev, "DPRC device unbound from driver"); - return 0; } static const struct fsl_mc_device_id match_id_table[] = { diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c index d129338b8bc0..38d40c09b719 100644 --- a/drivers/bus/fsl-mc/dprc.c +++ b/drivers/bus/fsl-mc/dprc.c @@ -450,10 +450,8 @@ int dprc_get_obj(struct fsl_mc_io *mc_io, obj_desc->ver_major = le16_to_cpu(rsp_params->version_major); obj_desc->ver_minor = le16_to_cpu(rsp_params->version_minor); obj_desc->flags = le16_to_cpu(rsp_params->flags); - strncpy(obj_desc->type, rsp_params->type, 16); - obj_desc->type[15] = '\0'; - strncpy(obj_desc->label, rsp_params->label, 16); - obj_desc->label[15] = '\0'; + strscpy_pad(obj_desc->type, rsp_params->type, 16); + strscpy_pad(obj_desc->label, rsp_params->label, 16); return 0; } EXPORT_SYMBOL_GPL(dprc_get_obj); @@ -491,8 +489,7 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io, cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr); cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num); cmd_params->obj_id = cpu_to_le32(obj_id); - strncpy(cmd_params->obj_type, obj_type, 16); - cmd_params->obj_type[15] = '\0'; + strscpy(cmd_params->obj_type, obj_type); /* send command to mc*/ return mc_send_command(mc_io, &cmd); @@ -564,8 +561,7 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io, cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params; cmd_params->obj_id = cpu_to_le32(obj_id); cmd_params->region_index = region_index; - strncpy(cmd_params->obj_type, obj_type, 16); - cmd_params->obj_type[15] = '\0'; + strscpy(cmd_params->obj_type, obj_type); /* send command to mc*/ err = mc_send_command(mc_io, &cmd); diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c index dced427ca8ba..d2ea59471323 100644 --- a/drivers/bus/fsl-mc/fsl-mc-allocator.c +++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c @@ -103,26 +103,32 @@ static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device struct fsl_mc_resource *resource; int error = -EINVAL; - if (!fsl_mc_is_allocatable(mc_dev)) - goto out; + mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); + mc_bus = to_fsl_mc_bus(mc_bus_dev); resource = mc_dev->resource; - if (!resource || resource->data != mc_dev) + if (!resource || resource->data != mc_dev) { + dev_err(&mc_bus_dev->dev, "resource mismatch\n"); goto out; + } - mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); - mc_bus = to_fsl_mc_bus(mc_bus_dev); res_pool = resource->parent_pool; - if (res_pool != &mc_bus->resource_pools[resource->type]) + if (res_pool != &mc_bus->resource_pools[resource->type]) { + dev_err(&mc_bus_dev->dev, "pool mismatch\n"); goto out; + } mutex_lock(&res_pool->mutex); - if (res_pool->max_count <= 0) + if (res_pool->max_count <= 0) { + dev_err(&mc_bus_dev->dev, "max_count underflow\n"); goto out_unlock; + } if (res_pool->free_count <= 0 || - res_pool->free_count > res_pool->max_count) + res_pool->free_count > res_pool->max_count) { + dev_err(&mc_bus_dev->dev, "free_count mismatch\n"); goto out_unlock; + } /* * If the device is currently allocated, its resource is not @@ -549,30 +555,6 @@ void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev) } } -static void fsl_mc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev, - enum fsl_mc_pool_type pool_type) -{ - struct fsl_mc_resource *resource; - struct fsl_mc_resource *next; - struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); - struct fsl_mc_resource_pool *res_pool = - &mc_bus->resource_pools[pool_type]; - int free_count = 0; - - list_for_each_entry_safe(resource, next, &res_pool->free_list, node) { - free_count++; - devm_kfree(&mc_bus_dev->dev, resource); - } -} - -void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev) -{ - int pool_type; - - for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++) - fsl_mc_cleanup_resource_pool(mc_bus_dev, pool_type); -} - /* * fsl_mc_allocator_probe - callback invoked when an allocatable device is * being added to the system @@ -609,22 +591,18 @@ static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev) * fsl_mc_allocator_remove - callback invoked when an allocatable device is * being removed from the system */ -static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev) +static void fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev) { int error; - if (!fsl_mc_is_allocatable(mc_dev)) - return -EINVAL; - if (mc_dev->resource) { error = fsl_mc_resource_pool_remove_device(mc_dev); if (error < 0) - return error; + return; } dev_dbg(&mc_dev->dev, "Allocatable fsl-mc device unbound from fsl_mc_allocator driver"); - return 0; } static const struct fsl_mc_device_id match_id_table[] = { @@ -657,8 +635,3 @@ int __init fsl_mc_allocator_driver_init(void) { return fsl_mc_driver_register(&fsl_mc_allocator_driver); } - -void fsl_mc_allocator_driver_exit(void) -{ - fsl_mc_driver_unregister(&fsl_mc_allocator_driver); -} diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c index 6143dbf31f31..25845c04e562 100644 --- a/drivers/bus/fsl-mc/fsl-mc-bus.c +++ b/drivers/bus/fsl-mc/fsl-mc-bus.c @@ -14,10 +14,10 @@ #include <linux/of_device.h> #include <linux/of_address.h> #include <linux/ioport.h> +#include <linux/platform_device.h> #include <linux/slab.h> #include <linux/limits.h> #include <linux/bitops.h> -#include <linux/msi.h> #include <linux/dma-mapping.h> #include <linux/acpi.h> #include <linux/iommu.h> @@ -80,11 +80,11 @@ static phys_addr_t mc_portal_base_phys_addr; * * Returns 1 on success, 0 otherwise. */ -static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv) +static int fsl_mc_bus_match(struct device *dev, const struct device_driver *drv) { const struct fsl_mc_device_id *id; struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); - struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv); + const struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv); bool found = false; /* When driver_override is set, only bind to the matching driver */ @@ -125,9 +125,9 @@ out: /* * fsl_mc_bus_uevent - callback invoked when a device is added */ -static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env) +static int fsl_mc_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) { - struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); + const struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); if (add_uevent_var(env, "MODALIAS=fsl-mc:v%08Xd%s", mc_dev->obj_desc.vendor, @@ -139,9 +139,9 @@ static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env) static int fsl_mc_dma_configure(struct device *dev) { + const struct device_driver *drv = READ_ONCE(dev->driver); struct device *dma_dev = dev; struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); - struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver); u32 input_id = mc_dev->icid; int ret; @@ -153,7 +153,8 @@ static int fsl_mc_dma_configure(struct device *dev) else ret = acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id); - if (!ret && !mc_drv->driver_managed_dma) { + /* @drv may not be valid when we're called from the IOMMU layer */ + if (!ret && drv && !to_fsl_mc_driver(drv)->driver_managed_dma) { ret = iommu_device_use_default_domain(dev); if (ret) arch_teardown_dma_ops(dev); @@ -175,8 +176,8 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, { struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); - return sprintf(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor, - mc_dev->obj_desc.type); + return sysfs_emit(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor, + mc_dev->obj_desc.type); } static DEVICE_ATTR_RO(modalias); @@ -202,7 +203,7 @@ static ssize_t driver_override_show(struct device *dev, { struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); - return snprintf(buf, PAGE_SIZE, "%s\n", mc_dev->driver_override); + return sysfs_emit(buf, "%s\n", mc_dev->driver_override); } static DEVICE_ATTR_RW(driver_override); @@ -232,7 +233,7 @@ exit: return 0; } -static ssize_t rescan_store(struct bus_type *bus, +static ssize_t rescan_store(const struct bus_type *bus, const char *buf, size_t count) { unsigned long val; @@ -285,7 +286,7 @@ exit: return 0; } -static ssize_t autorescan_store(struct bus_type *bus, +static ssize_t autorescan_store(const struct bus_type *bus, const char *buf, size_t count) { bus_for_each_dev(bus, NULL, (void *)buf, fsl_mc_bus_set_autorescan); @@ -293,7 +294,7 @@ static ssize_t autorescan_store(struct bus_type *bus, return count; } -static ssize_t autorescan_show(struct bus_type *bus, char *buf) +static ssize_t autorescan_show(const struct bus_type *bus, char *buf) { bus_for_each_dev(bus, NULL, (void *)buf, fsl_mc_bus_get_autorescan); return strlen(buf); @@ -309,7 +310,7 @@ static struct attribute *fsl_mc_bus_attrs[] = { ATTRIBUTE_GROUPS(fsl_mc_bus); -struct bus_type fsl_mc_bus_type = { +const struct bus_type fsl_mc_bus_type = { .name = "fsl-mc", .match = fsl_mc_bus_match, .uevent = fsl_mc_bus_uevent, @@ -320,90 +321,90 @@ struct bus_type fsl_mc_bus_type = { }; EXPORT_SYMBOL_GPL(fsl_mc_bus_type); -struct device_type fsl_mc_bus_dprc_type = { +const struct device_type fsl_mc_bus_dprc_type = { .name = "fsl_mc_bus_dprc" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dprc_type); -struct device_type fsl_mc_bus_dpni_type = { +const struct device_type fsl_mc_bus_dpni_type = { .name = "fsl_mc_bus_dpni" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpni_type); -struct device_type fsl_mc_bus_dpio_type = { +const struct device_type fsl_mc_bus_dpio_type = { .name = "fsl_mc_bus_dpio" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpio_type); -struct device_type fsl_mc_bus_dpsw_type = { +const struct device_type fsl_mc_bus_dpsw_type = { .name = "fsl_mc_bus_dpsw" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpsw_type); -struct device_type fsl_mc_bus_dpbp_type = { +const struct device_type fsl_mc_bus_dpbp_type = { .name = "fsl_mc_bus_dpbp" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpbp_type); -struct device_type fsl_mc_bus_dpcon_type = { +const struct device_type fsl_mc_bus_dpcon_type = { .name = "fsl_mc_bus_dpcon" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpcon_type); -struct device_type fsl_mc_bus_dpmcp_type = { +const struct device_type fsl_mc_bus_dpmcp_type = { .name = "fsl_mc_bus_dpmcp" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmcp_type); -struct device_type fsl_mc_bus_dpmac_type = { +const struct device_type fsl_mc_bus_dpmac_type = { .name = "fsl_mc_bus_dpmac" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpmac_type); -struct device_type fsl_mc_bus_dprtc_type = { +const struct device_type fsl_mc_bus_dprtc_type = { .name = "fsl_mc_bus_dprtc" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dprtc_type); -struct device_type fsl_mc_bus_dpseci_type = { +const struct device_type fsl_mc_bus_dpseci_type = { .name = "fsl_mc_bus_dpseci" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpseci_type); -struct device_type fsl_mc_bus_dpdmux_type = { +const struct device_type fsl_mc_bus_dpdmux_type = { .name = "fsl_mc_bus_dpdmux" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmux_type); -struct device_type fsl_mc_bus_dpdcei_type = { +const struct device_type fsl_mc_bus_dpdcei_type = { .name = "fsl_mc_bus_dpdcei" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdcei_type); -struct device_type fsl_mc_bus_dpaiop_type = { +const struct device_type fsl_mc_bus_dpaiop_type = { .name = "fsl_mc_bus_dpaiop" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpaiop_type); -struct device_type fsl_mc_bus_dpci_type = { +const struct device_type fsl_mc_bus_dpci_type = { .name = "fsl_mc_bus_dpci" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpci_type); -struct device_type fsl_mc_bus_dpdmai_type = { +const struct device_type fsl_mc_bus_dpdmai_type = { .name = "fsl_mc_bus_dpdmai" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmai_type); -struct device_type fsl_mc_bus_dpdbg_type = { +const struct device_type fsl_mc_bus_dpdbg_type = { .name = "fsl_mc_bus_dpdbg" }; EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdbg_type); -static struct device_type *fsl_mc_get_device_type(const char *type) +static const struct device_type *fsl_mc_get_device_type(const char *type) { static const struct { - struct device_type *dev_type; + const struct device_type *dev_type; const char *type; } dev_types[] = { { &fsl_mc_bus_dprc_type, "dprc" }, @@ -455,13 +456,8 @@ static int fsl_mc_driver_remove(struct device *dev) { struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver); struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); - int error; - error = mc_drv->remove(mc_dev); - if (error < 0) { - dev_err(dev, "%s failed: %d\n", __func__, error); - return error; - } + mc_drv->remove(mc_dev); return 0; } @@ -910,8 +906,10 @@ int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc, error_cleanup_dev: kfree(mc_dev->regions); - kfree(mc_bus); - kfree(mc_dev); + if (mc_bus) + kfree(mc_bus); + else + kfree(mc_dev); return error; } @@ -945,6 +943,7 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev, struct fsl_mc_obj_desc endpoint_desc = {{ 0 }}; struct dprc_endpoint endpoint1 = {{ 0 }}; struct dprc_endpoint endpoint2 = {{ 0 }}; + struct fsl_mc_bus *mc_bus; int state, err; mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); @@ -968,6 +967,8 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev, strcpy(endpoint_desc.type, endpoint2.type); endpoint_desc.id = endpoint2.id; endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev); + if (endpoint) + return endpoint; /* * We know that the device has an endpoint because we verified by @@ -975,17 +976,13 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev, * yet discovered by the fsl-mc bus, thus the lookup returned NULL. * Force a rescan of the devices in this container and retry the lookup. */ - if (!endpoint) { - struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); - - if (mutex_trylock(&mc_bus->scan_mutex)) { - err = dprc_scan_objects(mc_bus_dev, true); - mutex_unlock(&mc_bus->scan_mutex); - } - - if (err < 0) - return ERR_PTR(err); + mc_bus = to_fsl_mc_bus(mc_bus_dev); + if (mutex_trylock(&mc_bus->scan_mutex)) { + err = dprc_scan_objects(mc_bus_dev, true); + mutex_unlock(&mc_bus->scan_mutex); } + if (err < 0) + return ERR_PTR(err); endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev); /* @@ -1000,75 +997,18 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev, } EXPORT_SYMBOL_GPL(fsl_mc_get_endpoint); -static int parse_mc_ranges(struct device *dev, - int *paddr_cells, - int *mc_addr_cells, - int *mc_size_cells, - const __be32 **ranges_start) -{ - const __be32 *prop; - int range_tuple_cell_count; - int ranges_len; - int tuple_len; - struct device_node *mc_node = dev->of_node; - - *ranges_start = of_get_property(mc_node, "ranges", &ranges_len); - if (!(*ranges_start) || !ranges_len) { - dev_warn(dev, - "missing or empty ranges property for device tree node '%pOFn'\n", - mc_node); - return 0; - } - - *paddr_cells = of_n_addr_cells(mc_node); - - prop = of_get_property(mc_node, "#address-cells", NULL); - if (prop) - *mc_addr_cells = be32_to_cpup(prop); - else - *mc_addr_cells = *paddr_cells; - - prop = of_get_property(mc_node, "#size-cells", NULL); - if (prop) - *mc_size_cells = be32_to_cpup(prop); - else - *mc_size_cells = of_n_size_cells(mc_node); - - range_tuple_cell_count = *paddr_cells + *mc_addr_cells + - *mc_size_cells; - - tuple_len = range_tuple_cell_count * sizeof(__be32); - if (ranges_len % tuple_len != 0) { - dev_err(dev, "malformed ranges property '%pOFn'\n", mc_node); - return -EINVAL; - } - - return ranges_len / tuple_len; -} - static int get_mc_addr_translation_ranges(struct device *dev, struct fsl_mc_addr_translation_range **ranges, u8 *num_ranges) { - int ret; - int paddr_cells; - int mc_addr_cells; - int mc_size_cells; - int i; - const __be32 *ranges_start; - const __be32 *cell; - - ret = parse_mc_ranges(dev, - &paddr_cells, - &mc_addr_cells, - &mc_size_cells, - &ranges_start); - if (ret < 0) - return ret; + struct fsl_mc_addr_translation_range *r; + struct of_range_parser parser; + struct of_range range; - *num_ranges = ret; - if (!ret) { + of_range_parser_init(&parser, dev->of_node); + *num_ranges = of_range_count(&parser); + if (!*num_ranges) { /* * Missing or empty ranges property ("ranges;") for the * 'fsl,qoriq-mc' node. In this case, identity mapping @@ -1084,20 +1024,13 @@ static int get_mc_addr_translation_ranges(struct device *dev, if (!(*ranges)) return -ENOMEM; - cell = ranges_start; - for (i = 0; i < *num_ranges; ++i) { - struct fsl_mc_addr_translation_range *range = &(*ranges)[i]; - - range->mc_region_type = of_read_number(cell, 1); - range->start_mc_offset = of_read_number(cell + 1, - mc_addr_cells - 1); - cell += mc_addr_cells; - range->start_phys_addr = of_read_number(cell, paddr_cells); - cell += paddr_cells; - range->end_mc_offset = range->start_mc_offset + - of_read_number(cell, mc_size_cells); - - cell += mc_size_cells; + r = *ranges; + for_each_of_range(&parser, &range) { + r->mc_region_type = range.flags; + r->start_mc_offset = range.bus_addr; + r->end_mc_offset = range.bus_addr + range.size; + r->start_phys_addr = range.cpu_addr; + r++; } return 0; @@ -1171,6 +1104,9 @@ static int fsl_mc_bus_probe(struct platform_device *pdev) * Get physical address of MC portal for the root DPRC: */ plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!plat_res) + return -EINVAL; + mc_portal_phys_addr = plat_res->start; mc_portal_size = resource_size(plat_res); mc_portal_base_phys_addr = mc_portal_phys_addr & ~0x3ffffff; @@ -1236,14 +1172,11 @@ error_cleanup_mc_io: * fsl_mc_bus_remove - callback invoked when the root MC bus is being * removed */ -static int fsl_mc_bus_remove(struct platform_device *pdev) +static void fsl_mc_bus_remove(struct platform_device *pdev) { struct fsl_mc *mc = platform_get_drvdata(pdev); struct fsl_mc_io *mc_io; - if (!fsl_mc_is_root_dprc(&mc->root_mc_bus_dev->dev)) - return -EINVAL; - mc_io = mc->root_mc_bus_dev->mc_io; fsl_mc_device_remove(mc->root_mc_bus_dev); fsl_destroy_mc_io(mc_io); @@ -1259,13 +1192,6 @@ static int fsl_mc_bus_remove(struct platform_device *pdev) (GCR1_P1_STOP | GCR1_P2_STOP), mc->fsl_mc_regs + FSL_MC_GCR1); } - - return 0; -} - -static void fsl_mc_bus_shutdown(struct platform_device *pdev) -{ - fsl_mc_bus_remove(pdev); } static const struct of_device_id fsl_mc_bus_match_table[] = { @@ -1290,7 +1216,7 @@ static struct platform_driver fsl_mc_bus_driver = { }, .probe = fsl_mc_bus_probe, .remove = fsl_mc_bus_remove, - .shutdown = fsl_mc_bus_shutdown, + .shutdown = fsl_mc_bus_remove, }; static int fsl_mc_bus_notifier(struct notifier_block *nb, diff --git a/drivers/bus/fsl-mc/fsl-mc-msi.c b/drivers/bus/fsl-mc/fsl-mc-msi.c index 0cfe859a4ac4..82cd69f7884c 100644 --- a/drivers/bus/fsl-mc/fsl-mc-msi.c +++ b/drivers/bus/fsl-mc/fsl-mc-msi.c @@ -7,8 +7,6 @@ * */ -#include <linux/of_device.h> -#include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/irq.h> #include <linux/irqdomain.h> @@ -213,21 +211,8 @@ struct irq_domain *fsl_mc_find_msi_domain(struct device *dev) int fsl_mc_msi_domain_alloc_irqs(struct device *dev, unsigned int irq_count) { - struct irq_domain *msi_domain; - int error; - - msi_domain = dev_get_msi_domain(dev); - if (!msi_domain) - return -EINVAL; - - error = msi_setup_device_data(dev); - if (error) - return error; + int error = msi_setup_device_data(dev); - msi_lock_descs(dev); - if (msi_first_desc(dev, MSI_DESC_ALL)) - error = -EINVAL; - msi_unlock_descs(dev); if (error) return error; @@ -235,7 +220,7 @@ int fsl_mc_msi_domain_alloc_irqs(struct device *dev, unsigned int irq_count) * NOTE: Calling this function will trigger the invocation of the * its_fsl_mc_msi_prepare() callback */ - error = msi_domain_alloc_irqs(msi_domain, dev, irq_count); + error = msi_domain_alloc_irqs_range(dev, MSI_DEFAULT_DOMAIN, 0, irq_count - 1); if (error) dev_err(dev, "Failed to allocate IRQs\n"); @@ -244,11 +229,5 @@ int fsl_mc_msi_domain_alloc_irqs(struct device *dev, unsigned int irq_count) void fsl_mc_msi_domain_free_irqs(struct device *dev) { - struct irq_domain *msi_domain; - - msi_domain = dev_get_msi_domain(dev); - if (!msi_domain) - return; - - msi_domain_free_irqs(msi_domain, dev); + msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN); } diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h index b3520ea1b9f4..beed4c53533d 100644 --- a/drivers/bus/fsl-mc/fsl-mc-private.h +++ b/drivers/bus/fsl-mc/fsl-mc-private.h @@ -66,10 +66,6 @@ int dpmcp_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); -int dpmcp_reset(struct fsl_mc_io *mc_io, - u32 cmd_flags, - u16 token); - /* * Data Path Resource Container (DPRC) API */ @@ -631,12 +627,8 @@ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev, int __init fsl_mc_allocator_driver_init(void); -void fsl_mc_allocator_driver_exit(void); - void fsl_mc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev); -void fsl_mc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev); - int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus, enum fsl_mc_pool_type pool_type, struct fsl_mc_resource diff --git a/drivers/bus/fsl-mc/fsl-mc-uapi.c b/drivers/bus/fsl-mc/fsl-mc-uapi.c index 9c4c1395fcdb..823969e4159c 100644 --- a/drivers/bus/fsl-mc/fsl-mc-uapi.c +++ b/drivers/bus/fsl-mc/fsl-mc-uapi.c @@ -48,6 +48,7 @@ enum fsl_mc_cmd_index { DPRC_GET_POOL, DPRC_GET_POOL_COUNT, DPRC_GET_CONNECTION, + DPRC_GET_MEM, DPCI_GET_LINK_STATE, DPCI_GET_PEER_ATTR, DPAIOP_GET_SL_VERSION, @@ -194,6 +195,12 @@ static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = { .token = true, .size = 32, }, + [DPRC_GET_MEM] = { + .cmdid_value = 0x16D0, + .cmdid_mask = 0xFFF0, + .token = true, + .size = 12, + }, [DPCI_GET_LINK_STATE] = { .cmdid_value = 0x0E10, @@ -275,13 +282,13 @@ static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = { .size = 8, }, [DPSW_GET_TAILDROP] = { - .cmdid_value = 0x0A80, + .cmdid_value = 0x0A90, .cmdid_mask = 0xFFF0, .token = true, .size = 14, }, [DPSW_SET_TAILDROP] = { - .cmdid_value = 0x0A90, + .cmdid_value = 0x0A80, .cmdid_mask = 0xFFF0, .token = true, .size = 24, diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c index 95b10a6cf307..cd8754763f40 100644 --- a/drivers/bus/fsl-mc/mc-io.c +++ b/drivers/bus/fsl-mc/mc-io.c @@ -214,12 +214,19 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev, if (error < 0) goto error_cleanup_resource; - dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev, - &dpmcp_dev->dev, - DL_FLAG_AUTOREMOVE_CONSUMER); - if (!dpmcp_dev->consumer_link) { - error = -EINVAL; - goto error_cleanup_mc_io; + /* If the DPRC device itself tries to allocate a portal (usually for + * UAPI interaction), don't add a device link between them since the + * DPMCP device is an actual child device of the DPRC and a reverse + * dependency is not allowed. + */ + if (mc_dev != mc_bus_dev) { + dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev, + &dpmcp_dev->dev, + DL_FLAG_AUTOREMOVE_CONSUMER); + if (!dpmcp_dev->consumer_link) { + error = -EINVAL; + goto error_cleanup_mc_io; + } } *new_mc_io = mc_io; @@ -263,23 +270,3 @@ void fsl_mc_portal_free(struct fsl_mc_io *mc_io) dpmcp_dev->consumer_link = NULL; } EXPORT_SYMBOL_GPL(fsl_mc_portal_free); - -/** - * fsl_mc_portal_reset - Resets the dpmcp object for a given fsl_mc_io object - * - * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free - */ -int fsl_mc_portal_reset(struct fsl_mc_io *mc_io) -{ - int error; - struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; - - error = dpmcp_reset(mc_io, 0, dpmcp_dev->mc_handle); - if (error < 0) { - dev_err(&dpmcp_dev->dev, "dpmcp_reset() failed: %d\n", error); - return error; - } - - return 0; -} -EXPORT_SYMBOL_GPL(fsl_mc_portal_reset); diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c index f2052cd0a051..31037f41893e 100644 --- a/drivers/bus/fsl-mc/mc-sys.c +++ b/drivers/bus/fsl-mc/mc-sys.c @@ -19,7 +19,7 @@ /* * Timeout in milliseconds to wait for the completion of an MC command */ -#define MC_CMD_COMPLETION_TIMEOUT_MS 500 +#define MC_CMD_COMPLETION_TIMEOUT_MS 15000 /* * usleep_range() min and max values used to throttle down polling @@ -248,7 +248,7 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd) enum mc_cmd_status status; unsigned long irq_flags = 0; - if (in_irq() && !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) + if (in_hardirq() && !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) return -EINVAL; if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c index 5b65a48f17e7..53dd1573e323 100644 --- a/drivers/bus/hisi_lpc.c +++ b/drivers/bus/hisi_lpc.c @@ -13,9 +13,9 @@ #include <linux/logic_pio.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/pci.h> +#include <linux/platform_device.h> #include <linux/serial_8250.h> #include <linux/slab.h> @@ -657,7 +657,7 @@ static int hisi_lpc_probe(struct platform_device *pdev) return ret; } -static int hisi_lpc_remove(struct platform_device *pdev) +static void hisi_lpc_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct hisi_lpc_dev *lpcdev = dev_get_drvdata(dev); @@ -669,8 +669,6 @@ static int hisi_lpc_remove(struct platform_device *pdev) of_platform_depopulate(dev); logic_pio_unregister_range(range); - - return 0; } static const struct of_device_id hisi_lpc_of_match[] = { diff --git a/drivers/bus/imx-aipstz.c b/drivers/bus/imx-aipstz.c new file mode 100644 index 000000000000..5fdf377f5d06 --- /dev/null +++ b/drivers/bus/imx-aipstz.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2025 NXP + */ + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> + +#define IMX_AIPSTZ_MPR0 0x0 + +struct imx_aipstz_config { + u32 mpr0; +}; + +struct imx_aipstz_data { + void __iomem *base; + const struct imx_aipstz_config *default_cfg; +}; + +static void imx_aipstz_apply_default(struct imx_aipstz_data *data) +{ + writel(data->default_cfg->mpr0, data->base + IMX_AIPSTZ_MPR0); +} + +static const struct of_device_id imx_aipstz_match_table[] = { + { .compatible = "simple-bus", }, + { } +}; + +static int imx_aipstz_probe(struct platform_device *pdev) +{ + struct imx_aipstz_data *data; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return dev_err_probe(&pdev->dev, -ENOMEM, + "failed to allocate data memory\n"); + + data->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); + if (IS_ERR(data->base)) + return dev_err_probe(&pdev->dev, -ENOMEM, + "failed to get/ioremap AC memory\n"); + + data->default_cfg = of_device_get_match_data(&pdev->dev); + + imx_aipstz_apply_default(data); + + dev_set_drvdata(&pdev->dev, data); + + pm_runtime_set_active(&pdev->dev); + devm_pm_runtime_enable(&pdev->dev); + + return of_platform_populate(pdev->dev.of_node, imx_aipstz_match_table, + NULL, &pdev->dev); +} + +static void imx_aipstz_remove(struct platform_device *pdev) +{ + of_platform_depopulate(&pdev->dev); +} + +static int imx_aipstz_runtime_resume(struct device *dev) +{ + struct imx_aipstz_data *data = dev_get_drvdata(dev); + + /* restore potentially lost configuration during domain power-off */ + imx_aipstz_apply_default(data); + + return 0; +} + +static const struct dev_pm_ops imx_aipstz_pm_ops = { + RUNTIME_PM_OPS(NULL, imx_aipstz_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) +}; + +/* + * following configuration is equivalent to: + * masters 0-7 => trusted for R/W + use AHB's HPROT[1] to det. privilege + */ +static const struct imx_aipstz_config imx8mp_aipstz_default_cfg = { + .mpr0 = 0x77777777, +}; + +static const struct of_device_id imx_aipstz_of_ids[] = { + { .compatible = "fsl,imx8mp-aipstz", .data = &imx8mp_aipstz_default_cfg }, + { } +}; +MODULE_DEVICE_TABLE(of, imx_aipstz_of_ids); + +static struct platform_driver imx_aipstz_of_driver = { + .probe = imx_aipstz_probe, + .remove = imx_aipstz_remove, + .driver = { + .name = "imx-aipstz", + .of_match_table = imx_aipstz_of_ids, + .pm = pm_ptr(&imx_aipstz_pm_ops), + }, +}; +module_platform_driver(imx_aipstz_of_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("IMX secure AHB to IP Slave bus (AIPSTZ) bridge driver"); +MODULE_AUTHOR("Laurentiu Mihalcea <laurentiu.mihalcea@nxp.com>"); diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c index 828c66bbaa67..83d623d97f5f 100644 --- a/drivers/bus/imx-weim.c +++ b/drivers/bus/imx-weim.c @@ -10,7 +10,11 @@ #include <linux/module.h> #include <linux/clk.h> #include <linux/io.h> -#include <linux/of_device.h> +#include <linux/of_address.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/property.h> #include <linux/mfd/syscon.h> #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> #include <linux/regmap.h> @@ -86,8 +90,8 @@ MODULE_DEVICE_TABLE(of, weim_id_table); static int imx_weim_gpr_setup(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; - struct property *prop; - const __be32 *p; + struct of_range_parser parser; + struct of_range range; struct regmap *gpr; u32 gprvals[4] = { 05, /* CS0(128M) CS1(0M) CS2(0M) CS3(0M) */ @@ -106,17 +110,17 @@ static int imx_weim_gpr_setup(struct platform_device *pdev) return 0; } - of_property_for_each_u32(np, "ranges", prop, p, val) { - if (i % 4 == 0) { - cs = val; - } else if (i % 4 == 3 && val) { - val = (val / SZ_32M) | 1; - gprval |= val << cs * 3; - } + if (of_range_parser_init(&parser, np)) + goto err; + + for_each_of_range(&parser, &range) { + cs = range.bus_addr >> 32; + val = (range.size / SZ_32M) | 1; + gprval |= val << cs * 3; i++; } - if (i == 0 || i % 4) + if (i == 0) goto err; for (i = 0; i < ARRAY_SIZE(gprvals); i++) { @@ -201,11 +205,9 @@ static int weim_timing_setup(struct device *dev, struct device_node *np, static int weim_parse_dt(struct platform_device *pdev) { - const struct of_device_id *of_id = of_match_device(weim_id_table, - &pdev->dev); - const struct imx_weim_devtype *devtype = of_id->data; + const struct imx_weim_devtype *devtype = device_get_match_data(&pdev->dev); + int ret = 0, have_child = 0; struct device_node *child; - int ret, have_child = 0; struct weim_priv *priv; void __iomem *base; u32 reg; @@ -263,7 +265,6 @@ static int weim_parse_dt(struct platform_device *pdev) static int weim_probe(struct platform_device *pdev) { struct weim_priv *priv; - struct resource *res; struct clk *clk; void __iomem *base; int ret; @@ -273,8 +274,7 @@ static int weim_probe(struct platform_device *pdev) return -ENOMEM; /* get the resource */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(&pdev->dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); @@ -282,22 +282,18 @@ static int weim_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, priv); /* get the clock */ - clk = devm_clk_get(&pdev->dev, NULL); + clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(clk)) return PTR_ERR(clk); - ret = clk_prepare_enable(clk); - if (ret) - return ret; - /* parse the device node */ ret = weim_parse_dt(pdev); if (ret) - clk_disable_unprepare(clk); - else - dev_info(&pdev->dev, "Driver registered.\n"); + return ret; - return ret; + dev_info(&pdev->dev, "Driver registered.\n"); + + return 0; } #if IS_ENABLED(CONFIG_OF_DYNAMIC) @@ -331,6 +327,12 @@ static int of_weim_notify(struct notifier_block *nb, unsigned long action, "Failed to setup timing for '%pOF'\n", rd->dn); if (!of_node_check_flag(rd->dn, OF_POPULATED)) { + /* + * Clear the flag before adding the device so that + * fw_devlink doesn't skip adding consumers to this + * device. + */ + rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE; if (!of_platform_device_create(rd->dn, NULL, &pdev->dev)) { dev_err(&pdev->dev, "Failed to create child device '%pOF'\n", diff --git a/drivers/bus/intel-ixp4xx-eb.c b/drivers/bus/intel-ixp4xx-eb.c index a4388440aca7..320cf307db05 100644 --- a/drivers/bus/intel-ixp4xx-eb.c +++ b/drivers/bus/intel-ixp4xx-eb.c @@ -33,7 +33,7 @@ #define IXP4XX_EXP_TIMING_STRIDE 0x04 #define IXP4XX_EXP_CS_EN BIT(31) #define IXP456_EXP_PAR_EN BIT(30) /* Only on IXP45x and IXP46x */ -#define IXP4XX_EXP_T1_MASK GENMASK(28, 27) +#define IXP4XX_EXP_T1_MASK GENMASK(29, 28) #define IXP4XX_EXP_T1_SHIFT 28 #define IXP4XX_EXP_T2_MASK GENMASK(27, 26) #define IXP4XX_EXP_T2_SHIFT 26 @@ -49,7 +49,7 @@ #define IXP4XX_EXP_SIZE_SHIFT 10 #define IXP4XX_EXP_CNFG_0 BIT(9) /* Always zero */ #define IXP43X_EXP_SYNC_INTEL BIT(8) /* Only on IXP43x */ -#define IXP43X_EXP_EXP_CHIP BIT(7) /* Only on IXP43x */ +#define IXP43X_EXP_EXP_CHIP BIT(7) /* Only on IXP43x, dangerous to touch on IXP42x */ #define IXP4XX_EXP_BYTE_RD16 BIT(6) #define IXP4XX_EXP_HRDY_POL BIT(5) /* Only on IXP42x */ #define IXP4XX_EXP_MUX_EN BIT(4) @@ -57,8 +57,6 @@ #define IXP4XX_EXP_WORD BIT(2) /* Always zero */ #define IXP4XX_EXP_WR_EN BIT(1) #define IXP4XX_EXP_BYTE_EN BIT(0) -#define IXP42X_RESERVED (BIT(30)|IXP4XX_EXP_CNFG_0|BIT(8)|BIT(7)|IXP4XX_EXP_WORD) -#define IXP43X_RESERVED (BIT(30)|IXP4XX_EXP_CNFG_0|BIT(5)|IXP4XX_EXP_WORD) #define IXP4XX_EXP_CNFG0 0x20 #define IXP4XX_EXP_CNFG0_MEM_MAP BIT(31) @@ -252,10 +250,9 @@ static void ixp4xx_exp_setup_chipselect(struct ixp4xx_eb *eb, cs_cfg |= val << IXP4XX_EXP_CYC_TYPE_SHIFT; } - if (eb->is_42x) - cs_cfg &= ~IXP42X_RESERVED; if (eb->is_43x) { - cs_cfg &= ~IXP43X_RESERVED; + /* Should always be zero */ + cs_cfg &= ~IXP4XX_EXP_WORD; /* * This bit for Intel strata flash is currently unused, but let's * report it if we find one. @@ -426,4 +423,3 @@ static struct platform_driver ixp4xx_exp_driver = { module_platform_driver(ixp4xx_exp_driver); MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>"); MODULE_DESCRIPTION("Intel IXP4xx external bus driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile index 46981331b38f..354204b0ef3a 100644 --- a/drivers/bus/mhi/Makefile +++ b/drivers/bus/mhi/Makefile @@ -1,5 +1,5 @@ # Host MHI stack -obj-y += host/ +obj-$(CONFIG_MHI_BUS) += host/ # Endpoint MHI stack -obj-y += ep/ +obj-$(CONFIG_MHI_BUS_EP) += ep/ diff --git a/drivers/bus/mhi/common.h b/drivers/bus/mhi/common.h index f794b9c8049e..dda340aaed95 100644 --- a/drivers/bus/mhi/common.h +++ b/drivers/bus/mhi/common.h @@ -297,30 +297,30 @@ struct mhi_ring_element { __le32 dword[2]; }; +#define MHI_STATE_LIST \ + mhi_state(RESET, "RESET") \ + mhi_state(READY, "READY") \ + mhi_state(M0, "M0") \ + mhi_state(M1, "M1") \ + mhi_state(M2, "M2") \ + mhi_state(M3, "M3") \ + mhi_state(M3_FAST, "M3_FAST") \ + mhi_state(BHI, "BHI") \ + mhi_state_end(SYS_ERR, "SYS ERROR") + +#undef mhi_state +#undef mhi_state_end + +#define mhi_state(a, b) case MHI_STATE_##a: return b; +#define mhi_state_end(a, b) case MHI_STATE_##a: return b; + static inline const char *mhi_state_str(enum mhi_state state) { switch (state) { - case MHI_STATE_RESET: - return "RESET"; - case MHI_STATE_READY: - return "READY"; - case MHI_STATE_M0: - return "M0"; - case MHI_STATE_M1: - return "M1"; - case MHI_STATE_M2: - return "M2"; - case MHI_STATE_M3: - return "M3"; - case MHI_STATE_M3_FAST: - return "M3 FAST"; - case MHI_STATE_BHI: - return "BHI"; - case MHI_STATE_SYS_ERR: - return "SYS ERROR"; + MHI_STATE_LIST default: return "Unknown state"; } -}; +} #endif /* _MHI_COMMON_H */ diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h index a2125fa5fe2f..512da7482acc 100644 --- a/drivers/bus/mhi/ep/internal.h +++ b/drivers/bus/mhi/ep/internal.h @@ -11,7 +11,7 @@ #include "../common.h" -extern struct bus_type mhi_ep_bus_type; +extern const struct bus_type mhi_ep_bus_type; #define MHI_REG_OFFSET 0x100 #define BHI_REG_OFFSET 0x200 @@ -126,6 +126,7 @@ struct mhi_ep_ring { union mhi_ep_ring_ctx *ring_ctx; struct mhi_ring_element *ring_cache; enum mhi_ep_ring_type type; + struct delayed_work intmodt_work; u64 rbase; size_t rd_offset; size_t wr_offset; @@ -135,7 +136,9 @@ struct mhi_ep_ring { u32 ch_id; u32 er_index; u32 irq_vector; + u32 intmodt; bool started; + bool irq_pending; }; struct mhi_ep_cmd { @@ -159,6 +162,7 @@ struct mhi_ep_chan { void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result); enum mhi_ch_state state; enum dma_data_direction dir; + size_t rd_offset; u64 tre_loc; u32 tre_size; u32 tre_bytes_left; diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 1dc8a3557a46..3c208b5c8446 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -54,11 +54,27 @@ static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, mutex_unlock(&mhi_cntrl->event_lock); /* - * Raise IRQ to host only if the BEI flag is not set in TRE. Host might - * set this flag for interrupt moderation as per MHI protocol. + * As per the MHI specification, section 4.3, Interrupt moderation: + * + * 1. If BEI flag is not set, cancel any pending intmodt work if started + * for the event ring and raise IRQ immediately. + * + * 2. If both BEI and intmodt are set, and if no IRQ is pending for the + * same event ring, start the IRQ delayed work as per the value of + * intmodt. If previous IRQ is pending, then do nothing as the pending + * IRQ is enough for the host to process the current event ring element. + * + * 3. If BEI is set and intmodt is not set, no need to raise IRQ. */ - if (!bei) + if (!bei) { + if (READ_ONCE(ring->irq_pending)) + cancel_delayed_work(&ring->intmodt_work); + mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector); + } else if (ring->intmodt && !READ_ONCE(ring->irq_pending)) { + WRITE_ONCE(ring->irq_pending, true); + schedule_delayed_work(&ring->intmodt_work, msecs_to_jiffies(ring->intmodt)); + } return 0; @@ -71,45 +87,77 @@ err_unlock: static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code) { - struct mhi_ring_element event = {}; + struct mhi_ring_element *event; + int ret; + + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); + if (!event) + return -ENOMEM; + + event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre)); + event->dword[0] = MHI_TRE_EV_DWORD0(code, len); + event->dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT); - event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre)); - event.dword[0] = MHI_TRE_EV_DWORD0(code, len); - event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT); + ret = mhi_ep_send_event(mhi_cntrl, ring->er_index, event, MHI_TRE_DATA_GET_BEI(tre)); + kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); - return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre)); + return ret; } int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state) { - struct mhi_ring_element event = {}; + struct mhi_ring_element *event; + int ret; + + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); + if (!event) + return -ENOMEM; + + event->dword[0] = MHI_SC_EV_DWORD0(state); + event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT); - event.dword[0] = MHI_SC_EV_DWORD0(state); - event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT); + ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); + kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); - return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); + return ret; } int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env) { - struct mhi_ring_element event = {}; + struct mhi_ring_element *event; + int ret; + + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); + if (!event) + return -ENOMEM; - event.dword[0] = MHI_EE_EV_DWORD0(exec_env); - event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT); + event->dword[0] = MHI_EE_EV_DWORD0(exec_env); + event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT); - return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); + ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); + kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); + + return ret; } static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code) { struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; - struct mhi_ring_element event = {}; + struct mhi_ring_element *event; + int ret; - event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element)); - event.dword[0] = MHI_CC_EV_DWORD0(code); - event.dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT); + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); + if (!event) + return -ENOMEM; + + event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element)); + event->dword[0] = MHI_CC_EV_DWORD0(code); + event->dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT); + + ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); + kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); - return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); + return ret; } static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) @@ -123,6 +171,13 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele int ret; ch_id = MHI_TRE_GET_CMD_CHID(el); + + /* Check if the channel is supported by the controller */ + if ((ch_id >= mhi_cntrl->max_chan) || !mhi_cntrl->mhi_chan[ch_id].name) { + dev_dbg(dev, "Channel (%u) not supported!\n", ch_id); + return -ENODEV; + } + mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring; @@ -144,6 +199,8 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele goto err_unlock; } + + mhi_chan->rd_offset = ch_ring->rd_offset; } /* Set channel state to RUNNING */ @@ -196,9 +253,11 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id); /* Send channel disconnect status to client drivers */ - result.transaction_status = -ENOTCONN; - result.bytes_xferd = 0; - mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + if (mhi_chan->xfer_cb) { + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } /* Set channel state to STOP */ mhi_chan->state = MHI_CH_STATE_STOP; @@ -217,7 +276,7 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele mutex_unlock(&mhi_chan->lock); break; case MHI_PKT_TYPE_RESET_CHAN_CMD: - dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id); + dev_dbg(dev, "Received RESET command for channel (%u)\n", ch_id); if (!ch_ring->started) { dev_err(dev, "Channel (%u) not opened\n", ch_id); return -ENODEV; @@ -228,9 +287,11 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele mhi_ep_ring_reset(mhi_cntrl, ch_ring); /* Send channel disconnect status to client driver */ - result.transaction_status = -ENOTCONN; - result.bytes_xferd = 0; - mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + if (mhi_chan->xfer_cb) { + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } /* Set channel state to DISABLED */ mhi_chan->state = MHI_CH_STATE_DISABLED; @@ -269,27 +330,86 @@ bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_directio struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; - return !!(ring->rd_offset == ring->wr_offset); + return !!(mhi_chan->rd_offset == ring->wr_offset); } EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty); +static void mhi_ep_read_completion(struct mhi_ep_buf_info *buf_info) +{ + struct mhi_ep_device *mhi_dev = buf_info->mhi_dev; + struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ep_chan *mhi_chan = mhi_dev->ul_chan; + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; + struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset]; + struct mhi_result result = {}; + int ret; + + if (mhi_chan->xfer_cb) { + result.buf_addr = buf_info->cb_buf; + result.dir = mhi_chan->dir; + result.bytes_xferd = buf_info->size; + + mhi_chan->xfer_cb(mhi_dev, &result); + } + + /* + * The host will split the data packet into multiple TREs if it can't fit + * the packet in a single TRE. In that case, CHAIN flag will be set by the + * host for all TREs except the last one. + */ + if (buf_info->code != MHI_EV_CC_OVERFLOW) { + if (MHI_TRE_DATA_GET_CHAIN(el)) { + /* + * IEOB (Interrupt on End of Block) flag will be set by the host if + * it expects the completion event for all TREs of a TD. + */ + if (MHI_TRE_DATA_GET_IEOB(el)) { + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, + MHI_TRE_DATA_GET_LEN(el), + MHI_EV_CC_EOB); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, + "Error sending transfer compl. event\n"); + goto err_free_tre_buf; + } + } + } else { + /* + * IEOT (Interrupt on End of Transfer) flag will be set by the host + * for the last TRE of the TD and expects the completion event for + * the same. + */ + if (MHI_TRE_DATA_GET_IEOT(el)) { + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, + MHI_TRE_DATA_GET_LEN(el), + MHI_EV_CC_EOT); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, + "Error sending transfer compl. event\n"); + goto err_free_tre_buf; + } + } + } + } + + mhi_ep_ring_inc_index(ring); + +err_free_tre_buf: + kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_info->cb_buf); +} + static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, - struct mhi_ep_ring *ring, - struct mhi_result *result, - u32 len) + struct mhi_ep_ring *ring) { struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; struct device *dev = &mhi_cntrl->mhi_dev->dev; - size_t tr_len, read_offset, write_offset; + size_t tr_len, read_offset; + struct mhi_ep_buf_info buf_info = {}; + u32 len = MHI_EP_DEFAULT_MTU; struct mhi_ring_element *el; - bool tr_done = false; - void *write_addr; - u64 read_addr; - u32 buf_left; + void *buf_addr; int ret; - buf_left = len; - do { /* Don't process the transfer ring if the channel is not in RUNNING state */ if (mhi_chan->state != MHI_CH_STATE_RUNNING) { @@ -297,97 +417,62 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, return -ENODEV; } - el = &ring->ring_cache[ring->rd_offset]; + el = &ring->ring_cache[mhi_chan->rd_offset]; /* Check if there is data pending to be read from previous read operation */ if (mhi_chan->tre_bytes_left) { dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left); - tr_len = min(buf_left, mhi_chan->tre_bytes_left); + tr_len = min(len, mhi_chan->tre_bytes_left); } else { mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el); mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el); mhi_chan->tre_bytes_left = mhi_chan->tre_size; - tr_len = min(buf_left, mhi_chan->tre_size); + tr_len = min(len, mhi_chan->tre_size); } read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; - write_offset = len - buf_left; - read_addr = mhi_chan->tre_loc + read_offset; - write_addr = result->buf_addr + write_offset; + + buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL); + if (!buf_addr) + return -ENOMEM; + + buf_info.host_addr = mhi_chan->tre_loc + read_offset; + buf_info.dev_addr = buf_addr; + buf_info.size = tr_len; + buf_info.cb = mhi_ep_read_completion; + buf_info.cb_buf = buf_addr; + buf_info.mhi_dev = mhi_chan->mhi_dev; + + if (mhi_chan->tre_bytes_left - tr_len) + buf_info.code = MHI_EV_CC_OVERFLOW; dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id); - ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len); + ret = mhi_cntrl->read_async(mhi_cntrl, &buf_info); if (ret < 0) { dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n"); - return ret; + goto err_free_buf_addr; } - buf_left -= tr_len; mhi_chan->tre_bytes_left -= tr_len; - /* - * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been - * read completely: - * - * 1. Send completion event to the host based on the flags set in TRE. - * 2. Increment the local read offset of the transfer ring. - */ - if (!mhi_chan->tre_bytes_left) { - /* - * The host will split the data packet into multiple TREs if it can't fit - * the packet in a single TRE. In that case, CHAIN flag will be set by the - * host for all TREs except the last one. - */ - if (MHI_TRE_DATA_GET_CHAIN(el)) { - /* - * IEOB (Interrupt on End of Block) flag will be set by the host if - * it expects the completion event for all TREs of a TD. - */ - if (MHI_TRE_DATA_GET_IEOB(el)) { - ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, - MHI_TRE_DATA_GET_LEN(el), - MHI_EV_CC_EOB); - if (ret < 0) { - dev_err(&mhi_chan->mhi_dev->dev, - "Error sending transfer compl. event\n"); - return ret; - } - } - } else { - /* - * IEOT (Interrupt on End of Transfer) flag will be set by the host - * for the last TRE of the TD and expects the completion event for - * the same. - */ - if (MHI_TRE_DATA_GET_IEOT(el)) { - ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, - MHI_TRE_DATA_GET_LEN(el), - MHI_EV_CC_EOT); - if (ret < 0) { - dev_err(&mhi_chan->mhi_dev->dev, - "Error sending transfer compl. event\n"); - return ret; - } - } - - tr_done = true; - } + if (!mhi_chan->tre_bytes_left) + mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size; + /* Read until the some buffer is left or the ring becomes not empty */ + } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); - mhi_ep_ring_inc_index(ring); - } + return 0; - result->bytes_xferd += tr_len; - } while (buf_left && !tr_done); +err_free_buf_addr: + kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_addr); - return 0; + return ret; } -static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) +static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring) { struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; struct mhi_result result = {}; - u32 len = MHI_EP_DEFAULT_MTU; struct mhi_ep_chan *mhi_chan; int ret; @@ -408,30 +493,43 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); } else { /* UL channel */ - result.buf_addr = kzalloc(len, GFP_KERNEL); - if (!result.buf_addr) - return -ENOMEM; + ret = mhi_ep_read_channel(mhi_cntrl, ring); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); + return ret; + } + } - do { - ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len); - if (ret < 0) { - dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); - kfree(result.buf_addr); - return ret; - } + return 0; +} - result.dir = mhi_chan->dir; - mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); - result.bytes_xferd = 0; - memset(result.buf_addr, 0, len); +static void mhi_ep_skb_completion(struct mhi_ep_buf_info *buf_info) +{ + struct mhi_ep_device *mhi_dev = buf_info->mhi_dev; + struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; + struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset]; + struct device *dev = &mhi_dev->dev; + struct mhi_result result = {}; + int ret; - /* Read until the ring becomes empty */ - } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); + if (mhi_chan->xfer_cb) { + result.buf_addr = buf_info->cb_buf; + result.dir = mhi_chan->dir; + result.bytes_xferd = buf_info->size; - kfree(result.buf_addr); + mhi_chan->xfer_cb(mhi_dev, &result); } - return 0; + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, buf_info->size, + buf_info->code); + if (ret) { + dev_err(dev, "Error sending transfer completion event\n"); + return; + } + + mhi_ep_ring_inc_index(ring); } /* TODO: Handle partially formed TDs */ @@ -440,12 +538,10 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb) struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; struct device *dev = &mhi_chan->mhi_dev->dev; + struct mhi_ep_buf_info buf_info = {}; struct mhi_ring_element *el; u32 buf_left, read_offset; struct mhi_ep_ring *ring; - enum mhi_ev_ccs code; - void *read_addr; - u64 write_addr; size_t tr_len; u32 tre_len; int ret; @@ -469,40 +565,44 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb) goto err_exit; } - el = &ring->ring_cache[ring->rd_offset]; + el = &ring->ring_cache[mhi_chan->rd_offset]; tre_len = MHI_TRE_DATA_GET_LEN(el); tr_len = min(buf_left, tre_len); read_offset = skb->len - buf_left; - read_addr = skb->data + read_offset; - write_addr = MHI_TRE_DATA_GET_PTR(el); - dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id); - ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len); - if (ret < 0) { - dev_err(dev, "Error writing to the channel\n"); - goto err_exit; - } + buf_info.dev_addr = skb->data + read_offset; + buf_info.host_addr = MHI_TRE_DATA_GET_PTR(el); + buf_info.size = tr_len; + buf_info.cb = mhi_ep_skb_completion; + buf_info.cb_buf = skb; + buf_info.mhi_dev = mhi_dev; - buf_left -= tr_len; /* * For all TREs queued by the host for DL channel, only the EOT flag will be set. * If the packet doesn't fit into a single TRE, send the OVERFLOW event to * the host so that the host can adjust the packet boundary to next TREs. Else send * the EOT event to the host indicating the packet boundary. */ - if (buf_left) - code = MHI_EV_CC_OVERFLOW; + if (buf_left - tr_len) + buf_info.code = MHI_EV_CC_OVERFLOW; else - code = MHI_EV_CC_EOT; + buf_info.code = MHI_EV_CC_EOT; - ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code); - if (ret) { - dev_err(dev, "Error sending transfer completion event\n"); + dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id); + ret = mhi_cntrl->write_async(mhi_cntrl, &buf_info); + if (ret < 0) { + dev_err(dev, "Error writing to the channel\n"); goto err_exit; } - mhi_ep_ring_inc_index(ring); + buf_left -= tr_len; + + /* + * Update the read offset cached in mhi_chan. Actual read offset + * will be updated by the completion handler. + */ + mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size; } while (buf_left); mutex_unlock(&mhi_chan->lock); @@ -691,7 +791,7 @@ static void mhi_ep_cmd_ring_worker(struct work_struct *work) el = &ring->ring_cache[ring->rd_offset]; ret = mhi_ep_process_cmd_ring(ring, el); - if (ret) + if (ret && ret != -ENODEV) dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset); mhi_ep_ring_inc_index(ring); @@ -703,7 +803,6 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work) struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work); struct device *dev = &mhi_cntrl->mhi_dev->dev; struct mhi_ep_ring_item *itr, *tmp; - struct mhi_ring_element *el; struct mhi_ep_ring *ring; struct mhi_ep_chan *chan; unsigned long flags; @@ -719,36 +818,47 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work) list_del(&itr->node); ring = itr->ring; + chan = &mhi_cntrl->mhi_chan[ring->ch_id]; + mutex_lock(&chan->lock); + + /* + * The ring could've stopped while we waited to grab the (chan->lock), so do + * a sanity check before going further. + */ + if (!ring->started) { + mutex_unlock(&chan->lock); + kfree(itr); + continue; + } + /* Update the write offset for the ring */ ret = mhi_ep_update_wr_offset(ring); if (ret) { dev_err(dev, "Error updating write offset for ring\n"); - kfree(itr); + mutex_unlock(&chan->lock); + kmem_cache_free(mhi_cntrl->ring_item_cache, itr); continue; } /* Sanity check to make sure there are elements in the ring */ - if (ring->rd_offset == ring->wr_offset) { - kfree(itr); + if (chan->rd_offset == ring->wr_offset) { + mutex_unlock(&chan->lock); + kmem_cache_free(mhi_cntrl->ring_item_cache, itr); continue; } - el = &ring->ring_cache[ring->rd_offset]; - chan = &mhi_cntrl->mhi_chan[ring->ch_id]; - - mutex_lock(&chan->lock); dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id); - ret = mhi_ep_process_ch_ring(ring, el); + ret = mhi_ep_process_ch_ring(ring); if (ret) { dev_err(dev, "Error processing ring for channel (%u): %d\n", ring->ch_id, ret); mutex_unlock(&chan->lock); - kfree(itr); + kmem_cache_free(mhi_cntrl->ring_item_cache, itr); continue; } mutex_unlock(&chan->lock); - kfree(itr); + kmem_cache_free(mhi_cntrl->ring_item_cache, itr); } } @@ -804,7 +914,7 @@ static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned lon u32 ch_id = ch_idx + i; ring = &mhi_cntrl->mhi_chan[ch_id].ring; - item = kzalloc(sizeof(*item), GFP_ATOMIC); + item = kmem_cache_zalloc(mhi_cntrl->ring_item_cache, GFP_ATOMIC); if (!item) return; @@ -973,44 +1083,25 @@ static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl) static void mhi_ep_reset_worker(struct work_struct *work) { struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work); - struct device *dev = &mhi_cntrl->mhi_dev->dev; enum mhi_state cur_state; - int ret; - mhi_ep_abort_transfer(mhi_cntrl); + mhi_ep_power_down(mhi_cntrl); + + mutex_lock(&mhi_cntrl->state_lock); - spin_lock_bh(&mhi_cntrl->state_lock); /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */ mhi_ep_mmio_reset(mhi_cntrl); cur_state = mhi_cntrl->mhi_state; - spin_unlock_bh(&mhi_cntrl->state_lock); /* * Only proceed further if the reset is due to SYS_ERR. The host will * issue reset during shutdown also and we don't need to do re-init in * that case. */ - if (cur_state == MHI_STATE_SYS_ERR) { - mhi_ep_mmio_init(mhi_cntrl); + if (cur_state == MHI_STATE_SYS_ERR) + mhi_ep_power_up(mhi_cntrl); - /* Set AMSS EE before signaling ready state */ - mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); - - /* All set, notify the host that we are ready */ - ret = mhi_ep_set_ready_state(mhi_cntrl); - if (ret) - return; - - dev_dbg(dev, "READY state notification sent to the host\n"); - - ret = mhi_ep_enable(mhi_cntrl); - if (ret) { - dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret); - return; - } - - enable_irq(mhi_cntrl->irq); - } + mutex_unlock(&mhi_cntrl->state_lock); } /* @@ -1045,8 +1136,9 @@ int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl) mhi_ep_mmio_mask_interrupts(mhi_cntrl); mhi_ep_mmio_init(mhi_cntrl); - mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)), - GFP_KERNEL); + mhi_cntrl->mhi_event = kcalloc(mhi_cntrl->event_rings, + sizeof(*mhi_cntrl->mhi_event), + GFP_KERNEL); if (!mhi_cntrl->mhi_event) return -ENOMEM; @@ -1089,11 +1181,11 @@ EXPORT_SYMBOL_GPL(mhi_ep_power_up); void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl) { - if (mhi_cntrl->enabled) + if (mhi_cntrl->enabled) { mhi_ep_abort_transfer(mhi_cntrl); - - kfree(mhi_cntrl->mhi_event); - disable_irq(mhi_cntrl->irq); + kfree(mhi_cntrl->mhi_event); + disable_irq(mhi_cntrl->irq); + } } EXPORT_SYMBOL_GPL(mhi_ep_power_down); @@ -1119,6 +1211,7 @@ void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl) dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n"); /* Set channel state to SUSPENDED */ + mhi_chan->state = MHI_CH_STATE_SUSPENDED; tmp &= ~CHAN_CTX_CHSTATE_MASK; tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED); mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); @@ -1148,6 +1241,7 @@ void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl) dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n"); /* Set channel state to RUNNING */ + mhi_chan->state = MHI_CH_STATE_RUNNING; tmp &= ~CHAN_CTX_CHSTATE_MASK; tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); @@ -1358,6 +1452,10 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq) return -EINVAL; + if (!mhi_cntrl->read_sync || !mhi_cntrl->write_sync || + !mhi_cntrl->read_async || !mhi_cntrl->write_async) + return -EINVAL; + ret = mhi_ep_chan_init(mhi_cntrl, config); if (ret) return ret; @@ -1368,21 +1466,44 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, goto err_free_ch; } + mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el", + sizeof(struct mhi_ring_element), 0, + 0, NULL); + if (!mhi_cntrl->ev_ring_el_cache) { + ret = -ENOMEM; + goto err_free_cmd; + } + + mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0, + 0, NULL); + if (!mhi_cntrl->tre_buf_cache) { + ret = -ENOMEM; + goto err_destroy_ev_ring_el_cache; + } + + mhi_cntrl->ring_item_cache = kmem_cache_create("mhi_ep_ring_item", + sizeof(struct mhi_ep_ring_item), 0, + 0, NULL); + if (!mhi_cntrl->ring_item_cache) { + ret = -ENOMEM; + goto err_destroy_tre_buf_cache; + } + INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker); INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker); - mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); + mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", WQ_PERCPU, 0); if (!mhi_cntrl->wq) { ret = -ENOMEM; - goto err_free_cmd; + goto err_destroy_ring_item_cache; } INIT_LIST_HEAD(&mhi_cntrl->st_transition_list); INIT_LIST_HEAD(&mhi_cntrl->ch_db_list); - spin_lock_init(&mhi_cntrl->state_lock); spin_lock_init(&mhi_cntrl->list_lock); + mutex_init(&mhi_cntrl->state_lock); mutex_init(&mhi_cntrl->event_lock); /* Set MHI version and AMSS EE before enumeration */ @@ -1435,6 +1556,12 @@ err_ida_free: ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); err_destroy_wq: destroy_workqueue(mhi_cntrl->wq); +err_destroy_ring_item_cache: + kmem_cache_destroy(mhi_cntrl->ring_item_cache); +err_destroy_ev_ring_el_cache: + kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache); +err_destroy_tre_buf_cache: + kmem_cache_destroy(mhi_cntrl->tre_buf_cache); err_free_cmd: kfree(mhi_cntrl->mhi_cmd); err_free_ch: @@ -1456,6 +1583,9 @@ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) free_irq(mhi_cntrl->irq, mhi_cntrl); + kmem_cache_destroy(mhi_cntrl->tre_buf_cache); + kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache); + kmem_cache_destroy(mhi_cntrl->ring_item_cache); kfree(mhi_cntrl->mhi_cmd); kfree(mhi_cntrl->mhi_chan); @@ -1543,18 +1673,18 @@ void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv) } EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister); -static int mhi_ep_uevent(struct device *dev, struct kobj_uevent_env *env) +static int mhi_ep_uevent(const struct device *dev, struct kobj_uevent_env *env) { - struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + const struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT, mhi_dev->name); } -static int mhi_ep_match(struct device *dev, struct device_driver *drv) +static int mhi_ep_match(struct device *dev, const struct device_driver *drv) { struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); - struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv); + const struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv); const struct mhi_device_id *id; /* @@ -1573,7 +1703,7 @@ static int mhi_ep_match(struct device *dev, struct device_driver *drv) return 0; }; -struct bus_type mhi_ep_bus_type = { +const struct bus_type mhi_ep_bus_type = { .name = "mhi_ep", .dev_name = "mhi_ep", .match = mhi_ep_match, diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c index 115518ec76a4..26357ee68dee 100644 --- a/drivers/bus/mhi/ep/ring.c +++ b/drivers/bus/mhi/ep/ring.c @@ -30,7 +30,8 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end) { struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; struct device *dev = &mhi_cntrl->mhi_dev->dev; - size_t start, copy_size; + struct mhi_ep_buf_info buf_info = {}; + size_t start; int ret; /* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */ @@ -43,30 +44,34 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end) start = ring->wr_offset; if (start < end) { - copy_size = (end - start) * sizeof(struct mhi_ring_element); - ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase + - (start * sizeof(struct mhi_ring_element)), - &ring->ring_cache[start], copy_size); + buf_info.size = (end - start) * sizeof(struct mhi_ring_element); + buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element)); + buf_info.dev_addr = &ring->ring_cache[start]; + + ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info); if (ret < 0) return ret; } else { - copy_size = (ring->ring_size - start) * sizeof(struct mhi_ring_element); - ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase + - (start * sizeof(struct mhi_ring_element)), - &ring->ring_cache[start], copy_size); + buf_info.size = (ring->ring_size - start) * sizeof(struct mhi_ring_element); + buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element)); + buf_info.dev_addr = &ring->ring_cache[start]; + + ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info); if (ret < 0) return ret; if (end) { - ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase, - &ring->ring_cache[0], - end * sizeof(struct mhi_ring_element)); + buf_info.host_addr = ring->rbase; + buf_info.dev_addr = &ring->ring_cache[0]; + buf_info.size = end * sizeof(struct mhi_ring_element); + + ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info); if (ret < 0) return ret; } } - dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, copy_size); + dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, buf_info.size); return 0; } @@ -102,6 +107,7 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e { struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ep_buf_info buf_info = {}; size_t old_offset = 0; u32 num_free_elem; __le64 rp; @@ -125,20 +131,23 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e } old_offset = ring->rd_offset; - mhi_ep_ring_inc_index(ring); dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset); + buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el)); + buf_info.dev_addr = el; + buf_info.size = sizeof(*el); + + ret = mhi_cntrl->write_sync(mhi_cntrl, &buf_info); + if (ret) + return ret; + + mhi_ep_ring_inc_index(ring); /* Update rp in ring context */ rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase); memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64)); - ret = mhi_cntrl->write_to_host(mhi_cntrl, el, ring->rbase + (old_offset * sizeof(*el)), - sizeof(*el)); - if (ret < 0) - return ret; - - return 0; + return ret; } void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id) @@ -157,6 +166,15 @@ void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 } } +static void mhi_ep_raise_irq(struct work_struct *work) +{ + struct mhi_ep_ring *ring = container_of(work, struct mhi_ep_ring, intmodt_work.work); + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + + mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector); + WRITE_ONCE(ring->irq_pending, false); +} + int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, union mhi_ep_ring_ctx *ctx) { @@ -173,8 +191,13 @@ int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, if (ring->type == RING_TYPE_CH) ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex); - if (ring->type == RING_TYPE_ER) + if (ring->type == RING_TYPE_ER) { ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec); + ring->intmodt = FIELD_GET(EV_CTX_INTMODT_MASK, + le32_to_cpu(ring->ring_ctx->ev.intmod)); + + INIT_DELAYED_WORK(&ring->intmodt_work, mhi_ep_raise_irq); + } /* During ring init, both rp and wp are equal */ memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64)); @@ -201,6 +224,9 @@ int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring) { + if (ring->type == RING_TYPE_ER) + cancel_delayed_work_sync(&ring->intmodt_work); + ring->started = false; kfree(ring->ring_cache); ring->ring_cache = NULL; diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c index 3655c19e23c7..fd200b2ac0bb 100644 --- a/drivers/bus/mhi/ep/sm.c +++ b/drivers/bus/mhi/ep/sm.c @@ -63,24 +63,23 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl) int ret; /* If MHI is in M3, resume suspended channels */ - spin_lock_bh(&mhi_cntrl->state_lock); + mutex_lock(&mhi_cntrl->state_lock); + old_state = mhi_cntrl->mhi_state; if (old_state == MHI_STATE_M3) mhi_ep_resume_channels(mhi_cntrl); ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0); - spin_unlock_bh(&mhi_cntrl->state_lock); - if (ret) { mhi_ep_handle_syserr(mhi_cntrl); - return ret; + goto err_unlock; } /* Signal host that the device moved to M0 */ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0); if (ret) { dev_err(dev, "Failed sending M0 state change event\n"); - return ret; + goto err_unlock; } if (old_state == MHI_STATE_READY) { @@ -88,11 +87,14 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl) ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EE_AMSS); if (ret) { dev_err(dev, "Failed sending AMSS EE event\n"); - return ret; + goto err_unlock; } } - return 0; +err_unlock: + mutex_unlock(&mhi_cntrl->state_lock); + + return ret; } int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl) @@ -100,13 +102,12 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl) struct device *dev = &mhi_cntrl->mhi_dev->dev; int ret; - spin_lock_bh(&mhi_cntrl->state_lock); - ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3); - spin_unlock_bh(&mhi_cntrl->state_lock); + mutex_lock(&mhi_cntrl->state_lock); + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3); if (ret) { mhi_ep_handle_syserr(mhi_cntrl); - return ret; + goto err_unlock; } mhi_ep_suspend_channels(mhi_cntrl); @@ -115,10 +116,13 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl) ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3); if (ret) { dev_err(dev, "Failed sending M3 state change event\n"); - return ret; + goto err_unlock; } - return 0; +err_unlock: + mutex_unlock(&mhi_cntrl->state_lock); + + return ret; } int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl) @@ -127,22 +131,24 @@ int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl) enum mhi_state mhi_state; int ret, is_ready; - spin_lock_bh(&mhi_cntrl->state_lock); + mutex_lock(&mhi_cntrl->state_lock); + /* Ensure that the MHISTATUS is set to RESET by host */ mhi_state = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK); is_ready = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK); if (mhi_state != MHI_STATE_RESET || is_ready) { dev_err(dev, "READY state transition failed. MHI host not in RESET state\n"); - spin_unlock_bh(&mhi_cntrl->state_lock); - return -EIO; + ret = -EIO; + goto err_unlock; } ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY); - spin_unlock_bh(&mhi_cntrl->state_lock); - if (ret) mhi_ep_handle_syserr(mhi_cntrl); +err_unlock: + mutex_unlock(&mhi_cntrl->state_lock); + return ret; } diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c index 26d0eddb1477..205d83ac069f 100644 --- a/drivers/bus/mhi/host/boot.c +++ b/drivers/bus/mhi/host/boot.c @@ -31,8 +31,8 @@ int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, int ret; for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) { - bhi_vec->dma_addr = mhi_buf->dma_addr; - bhi_vec->size = mhi_buf->len; + bhi_vec->dma_addr = cpu_to_le64(mhi_buf->dma_addr); + bhi_vec->size = cpu_to_le64(mhi_buf->len); } dev_dbg(dev, "BHIe programming for RDDM\n"); @@ -82,9 +82,9 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) * other cores to shutdown while we're collecting RDDM buffer. After * returning from this function, we expect the device to reset. * - * Normaly, we read/write pm_state only after grabbing the + * Normally, we read/write pm_state only after grabbing the * pm_lock, since we're in a panic, skipping it. Also there is no - * gurantee that this state change would take effect since + * guarantee that this state change would take effect since * we're setting it w/o grabbing pm_lock */ mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; @@ -118,9 +118,7 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) /* Hardware reset so force device to enter RDDM */ dev_dbg(dev, "Did not enter RDDM, do a host req reset\n"); - mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, - MHI_SOC_RESET_REQ_OFFSET, - MHI_SOC_RESET_REQ); + mhi_soc_reset(mhi_cntrl); udelay(delayus); } @@ -179,6 +177,36 @@ int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic) } EXPORT_SYMBOL_GPL(mhi_download_rddm_image); +static void mhi_fw_load_error_dump(struct mhi_controller *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + rwlock_t *pm_lock = &mhi_cntrl->pm_lock; + void __iomem *base = mhi_cntrl->bhi; + int ret, i; + u32 val; + struct { + char *name; + u32 offset; + } error_reg[] = { + { "ERROR_CODE", BHI_ERRCODE }, + { "ERROR_DBG1", BHI_ERRDBG1 }, + { "ERROR_DBG2", BHI_ERRDBG2 }, + { "ERROR_DBG3", BHI_ERRDBG3 }, + { NULL }, + }; + + read_lock_bh(pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + for (i = 0; error_reg[i].name; i++) { + ret = mhi_read_reg(mhi_cntrl, base, error_reg[i].offset, &val); + if (ret) + break; + dev_err(dev, "Reg: %s value: 0x%x\n", error_reg[i].name, val); + } + } + read_unlock_bh(pm_lock); +} + static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl, const struct mhi_buf *mhi_buf) { @@ -228,24 +256,13 @@ static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl, } static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl, - dma_addr_t dma_addr, - size_t size) + const struct mhi_buf *mhi_buf) { - u32 tx_status, val, session_id; - int i, ret; - void __iomem *base = mhi_cntrl->bhi; - rwlock_t *pm_lock = &mhi_cntrl->pm_lock; struct device *dev = &mhi_cntrl->mhi_dev->dev; - struct { - char *name; - u32 offset; - } error_reg[] = { - { "ERROR_CODE", BHI_ERRCODE }, - { "ERROR_DBG1", BHI_ERRDBG1 }, - { "ERROR_DBG2", BHI_ERRDBG2 }, - { "ERROR_DBG3", BHI_ERRDBG3 }, - { NULL }, - }; + rwlock_t *pm_lock = &mhi_cntrl->pm_lock; + void __iomem *base = mhi_cntrl->bhi; + u32 tx_status, session_id; + int ret; read_lock_bh(pm_lock); if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { @@ -257,11 +274,9 @@ static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl, dev_dbg(dev, "Starting image download via BHI. Session ID: %u\n", session_id); mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0); - mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, - upper_32_bits(dma_addr)); - mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, - lower_32_bits(dma_addr)); - mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size); + mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, upper_32_bits(mhi_buf->dma_addr)); + mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, lower_32_bits(mhi_buf->dma_addr)); + mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, mhi_buf->len); mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id); read_unlock_bh(pm_lock); @@ -276,18 +291,7 @@ static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl, if (tx_status == BHI_STATUS_ERROR) { dev_err(dev, "Image transfer failed\n"); - read_lock_bh(pm_lock); - if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { - for (i = 0; error_reg[i].name; i++) { - ret = mhi_read_reg(mhi_cntrl, base, - error_reg[i].offset, &val); - if (ret) - break; - dev_err(dev, "Reg: %s value: 0x%x\n", - error_reg[i].name, val); - } - } - read_unlock_bh(pm_lock); + mhi_fw_load_error_dump(mhi_cntrl); goto invalid_pm_state; } @@ -298,6 +302,16 @@ invalid_pm_state: return -EIO; } +static void mhi_free_bhi_buffer(struct mhi_controller *mhi_cntrl, + struct image_info *image_info) +{ + struct mhi_buf *mhi_buf = image_info->mhi_buf; + + dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, mhi_buf->buf, mhi_buf->dma_addr); + kfree(image_info->mhi_buf); + kfree(image_info); +} + void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, struct image_info *image_info) { @@ -312,6 +326,45 @@ void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, kfree(image_info); } +static int mhi_alloc_bhi_buffer(struct mhi_controller *mhi_cntrl, + struct image_info **image_info, + size_t alloc_size) +{ + struct image_info *img_info; + struct mhi_buf *mhi_buf; + + img_info = kzalloc(sizeof(*img_info), GFP_KERNEL); + if (!img_info) + return -ENOMEM; + + /* Allocate memory for entry */ + img_info->mhi_buf = kzalloc(sizeof(*img_info->mhi_buf), GFP_KERNEL); + if (!img_info->mhi_buf) + goto error_alloc_mhi_buf; + + /* Allocate and populate vector table */ + mhi_buf = img_info->mhi_buf; + + mhi_buf->len = alloc_size; + mhi_buf->buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, + &mhi_buf->dma_addr, GFP_KERNEL); + if (!mhi_buf->buf) + goto error_alloc_segment; + + img_info->bhi_vec = NULL; + img_info->entries = 1; + *image_info = img_info; + + return 0; + +error_alloc_segment: + kfree(mhi_buf); +error_alloc_mhi_buf: + kfree(img_info); + + return -ENOMEM; +} + int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, struct image_info **image_info, size_t alloc_size) @@ -359,6 +412,7 @@ error_alloc_segment: for (--i, --mhi_buf; i >= 0; i--, mhi_buf--) dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len, mhi_buf->buf, mhi_buf->dma_addr); + kfree(img_info->mhi_buf); error_alloc_mhi_buf: kfree(img_info); @@ -366,21 +420,19 @@ error_alloc_mhi_buf: return -ENOMEM; } -static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl, - const struct firmware *firmware, - struct image_info *img_info) +static void mhi_firmware_copy_bhie(struct mhi_controller *mhi_cntrl, + const u8 *buf, size_t remainder, + struct image_info *img_info) { - size_t remainder = firmware->size; size_t to_cpy; - const u8 *buf = firmware->data; struct mhi_buf *mhi_buf = img_info->mhi_buf; struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; while (remainder) { to_cpy = min(remainder, mhi_buf->len); memcpy(mhi_buf->buf, buf, to_cpy); - bhi_vec->dma_addr = mhi_buf->dma_addr; - bhi_vec->size = to_cpy; + bhi_vec->dma_addr = cpu_to_le64(mhi_buf->dma_addr); + bhi_vec->size = cpu_to_le64(to_cpy); buf += to_cpy; remainder -= to_cpy; @@ -389,15 +441,63 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl, } } +static enum mhi_fw_load_type mhi_fw_load_type_get(const struct mhi_controller *mhi_cntrl) +{ + if (mhi_cntrl->fbc_download) { + return MHI_FW_LOAD_FBC; + } else { + if (mhi_cntrl->seg_len) + return MHI_FW_LOAD_BHIE; + else + return MHI_FW_LOAD_BHI; + } +} + +static int mhi_load_image_bhi(struct mhi_controller *mhi_cntrl, const u8 *fw_data, size_t size) +{ + struct image_info *image; + int ret; + + ret = mhi_alloc_bhi_buffer(mhi_cntrl, &image, size); + if (ret) + return ret; + + /* Load the firmware into BHI vec table */ + memcpy(image->mhi_buf->buf, fw_data, size); + + ret = mhi_fw_load_bhi(mhi_cntrl, &image->mhi_buf[image->entries - 1]); + mhi_free_bhi_buffer(mhi_cntrl, image); + + return ret; +} + +static int mhi_load_image_bhie(struct mhi_controller *mhi_cntrl, const u8 *fw_data, size_t size) +{ + struct image_info *image; + int ret; + + ret = mhi_alloc_bhie_table(mhi_cntrl, &image, size); + if (ret) + return ret; + + mhi_firmware_copy_bhie(mhi_cntrl, fw_data, size, image); + + ret = mhi_fw_load_bhie(mhi_cntrl, &image->mhi_buf[image->entries - 1]); + mhi_free_bhie_table(mhi_cntrl, image); + + return ret; +} + void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) { const struct firmware *firmware = NULL; struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_fw_load_type fw_load_type; + enum mhi_pm_state new_state; const char *fw_name; - void *buf; - dma_addr_t dma_addr; - size_t size; - int i, ret; + const u8 *fw_data; + size_t size, fw_sz; + int ret; if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { dev_err(dev, "Device MHI is not in valid state\n"); @@ -410,15 +510,6 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) if (ret) dev_err(dev, "Could not capture serial number via BHI\n"); - for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) { - ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i), - &mhi_cntrl->oem_pk_hash[i]); - if (ret) { - dev_err(dev, "Could not capture OEM PK HASH via BHI\n"); - break; - } - } - /* wait for ready on pass through or any other execution environment */ if (!MHI_FW_LOAD_CAPABLE(mhi_cntrl->ee)) goto fw_load_ready_state; @@ -426,6 +517,20 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ? mhi_cntrl->edl_image : mhi_cntrl->fw_image; + /* check if the driver has already provided the firmware data */ + if (!fw_name && mhi_cntrl->fbc_download && + mhi_cntrl->fw_data && mhi_cntrl->fw_sz) { + if (!mhi_cntrl->sbl_size) { + dev_err(dev, "fw_data provided but no sbl_size\n"); + goto error_fw_load; + } + + size = mhi_cntrl->sbl_size; + fw_data = mhi_cntrl->fw_data; + fw_sz = mhi_cntrl->fw_sz; + goto skip_req_fw; + } + if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size || !mhi_cntrl->seg_len))) { dev_err(dev, @@ -445,27 +550,27 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) if (size > firmware->size) size = firmware->size; - buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, &dma_addr, - GFP_KERNEL); - if (!buf) { - release_firmware(firmware); - goto error_fw_load; - } + fw_data = firmware->data; + fw_sz = firmware->size; - /* Download image using BHI */ - memcpy(buf, firmware->data, size); - ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size); - dma_free_coherent(mhi_cntrl->cntrl_dev, size, buf, dma_addr); +skip_req_fw: + fw_load_type = mhi_fw_load_type_get(mhi_cntrl); + if (fw_load_type == MHI_FW_LOAD_BHIE) + ret = mhi_load_image_bhie(mhi_cntrl, fw_data, size); + else + ret = mhi_load_image_bhi(mhi_cntrl, fw_data, size); /* Error or in EDL mode, we're done */ if (ret) { - dev_err(dev, "MHI did not load image over BHI, ret: %d\n", ret); + dev_err(dev, "MHI did not load image over BHI%s, ret: %d\n", + fw_load_type == MHI_FW_LOAD_BHIE ? "e" : "", + ret); release_firmware(firmware); goto error_fw_load; } /* Wait for ready since EDL image was loaded */ - if (fw_name == mhi_cntrl->edl_image) { + if (fw_name && fw_name == mhi_cntrl->edl_image) { release_firmware(firmware); goto fw_load_ready_state; } @@ -478,16 +583,15 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) * If we're doing fbc, populate vector tables while * device transitioning into MHI READY state */ - if (mhi_cntrl->fbc_download) { - ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, - firmware->size); + if (fw_load_type == MHI_FW_LOAD_FBC) { + ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, fw_sz); if (ret) { release_firmware(firmware); goto error_fw_load; } /* Load the firmware into BHIE vec table */ - mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image); + mhi_firmware_copy_bhie(mhi_cntrl, fw_data, fw_sz, mhi_cntrl->fbc_image); } release_firmware(firmware); @@ -504,20 +608,24 @@ fw_load_ready_state: return; error_ready_state: - if (mhi_cntrl->fbc_download) { + if (mhi_cntrl->fbc_image) { mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); mhi_cntrl->fbc_image = NULL; } error_fw_load: - mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR; - wake_up_all(&mhi_cntrl->state_event); + write_lock_irq(&mhi_cntrl->pm_lock); + new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_FW_DL_ERR); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (new_state == MHI_PM_FW_DL_ERR) + wake_up_all(&mhi_cntrl->state_event); } int mhi_download_amss_image(struct mhi_controller *mhi_cntrl) { struct image_info *image_info = mhi_cntrl->fbc_image; struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_pm_state new_state; int ret; if (!image_info) @@ -528,8 +636,11 @@ int mhi_download_amss_image(struct mhi_controller *mhi_cntrl) &image_info->mhi_buf[image_info->entries - 1]); if (ret) { dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret); - mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR; - wake_up_all(&mhi_cntrl->state_event); + write_lock_irq(&mhi_cntrl->pm_lock); + new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_FW_DL_ERR); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (new_state == MHI_PM_FW_DL_ERR) + wake_up_all(&mhi_cntrl->state_event); } return ret; diff --git a/drivers/bus/mhi/host/debugfs.c b/drivers/bus/mhi/host/debugfs.c index cfec7811dfbb..39e45748a24c 100644 --- a/drivers/bus/mhi/host/debugfs.c +++ b/drivers/bus/mhi/host/debugfs.c @@ -10,6 +10,7 @@ #include <linux/list.h> #include <linux/mhi.h> #include <linux/module.h> +#include <linux/string_choices.h> #include "internal.h" static int mhi_debugfs_states_show(struct seq_file *m, void *d) @@ -22,7 +23,7 @@ static int mhi_debugfs_states_show(struct seq_file *m, void *d) mhi_is_active(mhi_cntrl) ? "Active" : "Inactive", mhi_state_str(mhi_cntrl->dev_state), TO_MHI_EXEC_STR(mhi_cntrl->ee), - mhi_cntrl->wake_set ? "true" : "false"); + str_true_false(mhi_cntrl->wake_set)); /* counters */ seq_printf(m, "M0: %u M2: %u M3: %u", mhi_cntrl->M0, mhi_cntrl->M2, diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c index bf672de35131..099be8dd1900 100644 --- a/drivers/bus/mhi/host/init.c +++ b/drivers/bus/mhi/host/init.c @@ -20,50 +20,49 @@ #include <linux/wait.h> #include "internal.h" +#define CREATE_TRACE_POINTS +#include "trace.h" + static DEFINE_IDA(mhi_controller_ida); +#undef mhi_ee +#undef mhi_ee_end + +#define mhi_ee(a, b) [MHI_EE_##a] = b, +#define mhi_ee_end(a, b) [MHI_EE_##a] = b, + const char * const mhi_ee_str[MHI_EE_MAX] = { - [MHI_EE_PBL] = "PRIMARY BOOTLOADER", - [MHI_EE_SBL] = "SECONDARY BOOTLOADER", - [MHI_EE_AMSS] = "MISSION MODE", - [MHI_EE_RDDM] = "RAMDUMP DOWNLOAD MODE", - [MHI_EE_WFW] = "WLAN FIRMWARE", - [MHI_EE_PTHRU] = "PASS THROUGH", - [MHI_EE_EDL] = "EMERGENCY DOWNLOAD", - [MHI_EE_FP] = "FLASH PROGRAMMER", - [MHI_EE_DISABLE_TRANSITION] = "DISABLE", - [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED", + MHI_EE_LIST }; +#undef dev_st_trans +#undef dev_st_trans_end + +#define dev_st_trans(a, b) [DEV_ST_TRANSITION_##a] = b, +#define dev_st_trans_end(a, b) [DEV_ST_TRANSITION_##a] = b, + const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = { - [DEV_ST_TRANSITION_PBL] = "PBL", - [DEV_ST_TRANSITION_READY] = "READY", - [DEV_ST_TRANSITION_SBL] = "SBL", - [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION MODE", - [DEV_ST_TRANSITION_FP] = "FLASH PROGRAMMER", - [DEV_ST_TRANSITION_SYS_ERR] = "SYS ERROR", - [DEV_ST_TRANSITION_DISABLE] = "DISABLE", + DEV_ST_TRANSITION_LIST }; +#undef ch_state_type +#undef ch_state_type_end + +#define ch_state_type(a, b) [MHI_CH_STATE_TYPE_##a] = b, +#define ch_state_type_end(a, b) [MHI_CH_STATE_TYPE_##a] = b, + const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = { - [MHI_CH_STATE_TYPE_RESET] = "RESET", - [MHI_CH_STATE_TYPE_STOP] = "STOP", - [MHI_CH_STATE_TYPE_START] = "START", + MHI_CH_STATE_TYPE_LIST }; +#undef mhi_pm_state +#undef mhi_pm_state_end + +#define mhi_pm_state(a, b) [MHI_PM_STATE_##a] = b, +#define mhi_pm_state_end(a, b) [MHI_PM_STATE_##a] = b, + static const char * const mhi_pm_state_str[] = { - [MHI_PM_STATE_DISABLE] = "DISABLE", - [MHI_PM_STATE_POR] = "POWER ON RESET", - [MHI_PM_STATE_M0] = "M0", - [MHI_PM_STATE_M2] = "M2", - [MHI_PM_STATE_M3_ENTER] = "M?->M3", - [MHI_PM_STATE_M3] = "M3", - [MHI_PM_STATE_M3_EXIT] = "M3->M0", - [MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error", - [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect", - [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process", - [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process", - [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect", + MHI_PM_STATE_LIST }; const char *to_mhi_pm_state_str(u32 state) @@ -97,11 +96,19 @@ static ssize_t oem_pk_hash_show(struct device *dev, { struct mhi_device *mhi_dev = to_mhi_device(dev); struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; - int i, cnt = 0; + u32 hash_segment[MHI_MAX_OEM_PK_HASH_SEGMENTS]; + int i, cnt = 0, ret; + + for (i = 0; i < MHI_MAX_OEM_PK_HASH_SEGMENTS; i++) { + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i), &hash_segment[i]); + if (ret) { + dev_err(dev, "Could not capture OEM PK HASH\n"); + return ret; + } + } - for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) - cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n", - i, mhi_cntrl->oem_pk_hash[i]); + for (i = 0; i < MHI_MAX_OEM_PK_HASH_SEGMENTS; i++) + cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n", i, hash_segment[i]); return cnt; } @@ -120,6 +127,30 @@ static ssize_t soc_reset_store(struct device *dev, } static DEVICE_ATTR_WO(soc_reset); +static ssize_t trigger_edl_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + unsigned long val; + int ret; + + ret = kstrtoul(buf, 10, &val); + if (ret < 0) + return ret; + + if (!val) + return -EINVAL; + + ret = mhi_cntrl->edl_trigger(mhi_cntrl); + if (ret) + return ret; + + return count; +} +static DEVICE_ATTR_WO(trigger_edl); + static struct attribute *mhi_dev_attrs[] = { &dev_attr_serial_number.attr, &dev_attr_oem_pk_hash.attr, @@ -145,7 +176,7 @@ static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl, return 0; } -void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl) +static void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl) { int i; struct mhi_event *mhi_event = mhi_cntrl->mhi_event; @@ -160,10 +191,9 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl) free_irq(mhi_cntrl->irq[0], mhi_cntrl); } -int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) +static int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) { struct mhi_event *mhi_event = mhi_cntrl->mhi_event; - struct device *dev = &mhi_cntrl->mhi_dev->dev; unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND; int i, ret; @@ -190,7 +220,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) continue; if (mhi_event->irq >= mhi_cntrl->nr_irqs) { - dev_err(dev, "irq %d not available for event ring\n", + dev_err(mhi_cntrl->cntrl_dev, "irq %d not available for event ring\n", mhi_event->irq); ret = -EINVAL; goto error_request; @@ -201,7 +231,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) irq_flags, "mhi", mhi_event); if (ret) { - dev_err(dev, "Error requesting irq:%d for ev:%d\n", + dev_err(mhi_cntrl->cntrl_dev, "Error requesting irq:%d for ev:%d\n", mhi_cntrl->irq[mhi_event->irq], i); goto error_request; } @@ -223,7 +253,7 @@ error_request: return ret; } -void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl) +static void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl) { int i; struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt; @@ -268,7 +298,7 @@ void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl) mhi_cntrl->mhi_ctxt = NULL; } -int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) +static int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) { struct mhi_ctxt *mhi_ctxt; struct mhi_chan_ctxt *chan_ctxt; @@ -510,10 +540,14 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl) dev_dbg(dev, "Initializing MHI registers\n"); /* Read channel db offset */ - ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, &val); - if (ret) { - dev_err(dev, "Unable to read CHDBOFF register\n"); - return -EIO; + ret = mhi_get_channel_doorbell_offset(mhi_cntrl, &val); + if (ret) + return ret; + + if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) { + dev_err(dev, "CHDB offset: 0x%x is out of range: 0x%zx\n", + val, mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)); + return -ERANGE; } /* Setup wake db */ @@ -532,6 +566,12 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl) return -EIO; } + if (val >= mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)) { + dev_err(dev, "ERDB offset: 0x%x is out of range: 0x%zx\n", + val, mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)); + return -ERANGE; + } + /* Setup event db address for each ev_ring */ mhi_event = mhi_cntrl->mhi_event; for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) { @@ -747,7 +787,7 @@ static int parse_ch_cfg(struct mhi_controller *mhi_cntrl, * so to avoid any memory possible allocation failures, vzalloc is * used here */ - mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan * + mhi_cntrl->mhi_chan = vcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan)); if (!mhi_cntrl->mhi_chan) return -ENOMEM; @@ -869,6 +909,7 @@ static int parse_config(struct mhi_controller *mhi_cntrl, if (!mhi_cntrl->timeout_ms) mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; + mhi_cntrl->ready_timeout_ms = config->ready_timeout_ms; mhi_cntrl->bounce_buf = config->use_bounce_buf; mhi_cntrl->buffer_len = config->buf_len; if (!mhi_cntrl->buffer_len) @@ -894,7 +935,6 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan; struct mhi_cmd *mhi_cmd; struct mhi_device *mhi_dev; - u32 soc_info; int ret, i; if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs || @@ -969,17 +1009,6 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, mhi_cntrl->unmap_single = mhi_unmap_single_no_bb; } - /* Read the MHI device info */ - ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, - SOC_HW_VERSION_OFFS, &soc_info); - if (ret) - goto err_destroy_wq; - - mhi_cntrl->family_number = FIELD_GET(SOC_HW_VERSION_FAM_NUM_BMSK, soc_info); - mhi_cntrl->device_number = FIELD_GET(SOC_HW_VERSION_DEV_NUM_BMSK, soc_info); - mhi_cntrl->major_version = FIELD_GET(SOC_HW_VERSION_MAJOR_VER_BMSK, soc_info); - mhi_cntrl->minor_version = FIELD_GET(SOC_HW_VERSION_MINOR_VER_BMSK, soc_info); - mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL); if (mhi_cntrl->index < 0) { ret = mhi_cntrl->index; @@ -1010,6 +1039,12 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, if (ret) goto err_release_dev; + if (mhi_cntrl->edl_trigger) { + ret = sysfs_create_file(&mhi_dev->dev.kobj, &dev_attr_trigger_edl.attr); + if (ret) + goto err_release_dev; + } + mhi_cntrl->mhi_dev = mhi_dev; mhi_create_debugfs(mhi_cntrl); @@ -1043,6 +1078,9 @@ void mhi_unregister_controller(struct mhi_controller *mhi_cntrl) mhi_deinit_free_irq(mhi_cntrl); mhi_destroy_debugfs(mhi_cntrl); + if (mhi_cntrl->edl_trigger) + sysfs_remove_file(&mhi_dev->dev.kobj, &dev_attr_trigger_edl.attr); + destroy_workqueue(mhi_cntrl->hiprio_wq); kfree(mhi_cntrl->mhi_cmd); kfree(mhi_cntrl->mhi_event); @@ -1100,12 +1138,12 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) if (bhi_off >= mhi_cntrl->reg_len) { dev_err(dev, "BHI offset: 0x%x is out of range: 0x%zx\n", bhi_off, mhi_cntrl->reg_len); - ret = -EINVAL; + ret = -ERANGE; goto error_reg_offset; } mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off; - if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) { + if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size || mhi_cntrl->seg_len) { ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &bhie_off); if (ret) { @@ -1117,7 +1155,7 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) dev_err(dev, "BHIe offset: 0x%x is out of range: 0x%zx\n", bhie_off, mhi_cntrl->reg_len); - ret = -EINVAL; + ret = -ERANGE; goto error_reg_offset; } mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off; @@ -1395,18 +1433,18 @@ void mhi_driver_unregister(struct mhi_driver *mhi_drv) } EXPORT_SYMBOL_GPL(mhi_driver_unregister); -static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env) +static int mhi_uevent(const struct device *dev, struct kobj_uevent_env *env) { - struct mhi_device *mhi_dev = to_mhi_device(dev); + const struct mhi_device *mhi_dev = to_mhi_device(dev); return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT, mhi_dev->name); } -static int mhi_match(struct device *dev, struct device_driver *drv) +static int mhi_match(struct device *dev, const struct device_driver *drv) { struct mhi_device *mhi_dev = to_mhi_device(dev); - struct mhi_driver *mhi_drv = to_mhi_driver(drv); + const struct mhi_driver *mhi_drv = to_mhi_driver(drv); const struct mhi_device_id *id; /* @@ -1425,7 +1463,7 @@ static int mhi_match(struct device *dev, struct device_driver *drv) return 0; }; -struct bus_type mhi_bus_type = { +const struct bus_type mhi_bus_type = { .name = "mhi", .dev_name = "mhi", .match = mhi_match, @@ -1449,4 +1487,4 @@ postcore_initcall(mhi_init); module_exit(mhi_exit); MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("MHI Host Interface"); +MODULE_DESCRIPTION("Modem Host Interface"); diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h index 01fd10a399b6..7937bb1f742c 100644 --- a/drivers/bus/mhi/host/internal.h +++ b/drivers/bus/mhi/host/internal.h @@ -9,18 +9,12 @@ #include "../common.h" -extern struct bus_type mhi_bus_type; +extern const struct bus_type mhi_bus_type; /* Host request register */ #define MHI_SOC_RESET_REQ_OFFSET 0xb0 #define MHI_SOC_RESET_REQ BIT(0) -#define SOC_HW_VERSION_OFFS 0x224 -#define SOC_HW_VERSION_FAM_NUM_BMSK GENMASK(31, 28) -#define SOC_HW_VERSION_DEV_NUM_BMSK GENMASK(27, 16) -#define SOC_HW_VERSION_MAJOR_VER_BMSK GENMASK(15, 8) -#define SOC_HW_VERSION_MINOR_VER_BMSK GENMASK(7, 0) - struct mhi_ctxt { struct mhi_event_ctxt *er_ctxt; struct mhi_chan_ctxt *chan_ctxt; @@ -31,8 +25,15 @@ struct mhi_ctxt { }; struct bhi_vec_entry { - u64 dma_addr; - u64 size; + __le64 dma_addr; + __le64 size; +}; + +enum mhi_fw_load_type { + MHI_FW_LOAD_BHI, /* BHI only in PBL */ + MHI_FW_LOAD_BHIE, /* BHIe only in PBL */ + MHI_FW_LOAD_FBC, /* BHI in PBL followed by BHIe in SBL */ + MHI_FW_LOAD_MAX, }; enum mhi_ch_state_type { @@ -42,6 +43,11 @@ enum mhi_ch_state_type { MHI_CH_STATE_TYPE_MAX, }; +#define MHI_CH_STATE_TYPE_LIST \ + ch_state_type(RESET, "RESET") \ + ch_state_type(STOP, "STOP") \ + ch_state_type_end(START, "START") + extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX]; #define TO_CH_STATE_TYPE_STR(state) (((state) >= MHI_CH_STATE_TYPE_MAX) ? \ "INVALID_STATE" : \ @@ -50,6 +56,18 @@ extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX]; #define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \ mode != MHI_DB_BRST_ENABLE) +#define MHI_EE_LIST \ + mhi_ee(PBL, "PRIMARY BOOTLOADER") \ + mhi_ee(SBL, "SECONDARY BOOTLOADER") \ + mhi_ee(AMSS, "MISSION MODE") \ + mhi_ee(RDDM, "RAMDUMP DOWNLOAD MODE")\ + mhi_ee(WFW, "WLAN FIRMWARE") \ + mhi_ee(PTHRU, "PASS THROUGH") \ + mhi_ee(EDL, "EMERGENCY DOWNLOAD") \ + mhi_ee(FP, "FLASH PROGRAMMER") \ + mhi_ee(DISABLE_TRANSITION, "DISABLE") \ + mhi_ee_end(NOT_SUPPORTED, "NOT SUPPORTED") + extern const char * const mhi_ee_str[MHI_EE_MAX]; #define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \ "INVALID_EE" : mhi_ee_str[ee]) @@ -69,9 +87,20 @@ enum dev_st_transition { DEV_ST_TRANSITION_FP, DEV_ST_TRANSITION_SYS_ERR, DEV_ST_TRANSITION_DISABLE, + DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE, DEV_ST_TRANSITION_MAX, }; +#define DEV_ST_TRANSITION_LIST \ + dev_st_trans(PBL, "PBL") \ + dev_st_trans(READY, "READY") \ + dev_st_trans(SBL, "SBL") \ + dev_st_trans(MISSION_MODE, "MISSION MODE") \ + dev_st_trans(FP, "FLASH PROGRAMMER") \ + dev_st_trans(SYS_ERR, "SYS ERROR") \ + dev_st_trans(DISABLE, "DISABLE") \ + dev_st_trans_end(DISABLE_DESTROY_DEVICE, "DISABLE (DESTROY DEVICE)") + extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX]; #define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \ "INVALID_STATE" : dev_state_tran_str[state]) @@ -88,11 +117,27 @@ enum mhi_pm_state { MHI_PM_STATE_FW_DL_ERR, MHI_PM_STATE_SYS_ERR_DETECT, MHI_PM_STATE_SYS_ERR_PROCESS, + MHI_PM_STATE_SYS_ERR_FAIL, MHI_PM_STATE_SHUTDOWN_PROCESS, MHI_PM_STATE_LD_ERR_FATAL_DETECT, MHI_PM_STATE_MAX }; +#define MHI_PM_STATE_LIST \ + mhi_pm_state(DISABLE, "DISABLE") \ + mhi_pm_state(POR, "POWER ON RESET") \ + mhi_pm_state(M0, "M0") \ + mhi_pm_state(M2, "M2") \ + mhi_pm_state(M3_ENTER, "M?->M3") \ + mhi_pm_state(M3, "M3") \ + mhi_pm_state(M3_EXIT, "M3->M0") \ + mhi_pm_state(FW_DL_ERR, "Firmware Download Error") \ + mhi_pm_state(SYS_ERR_DETECT, "SYS ERROR Detect") \ + mhi_pm_state(SYS_ERR_PROCESS, "SYS ERROR Process") \ + mhi_pm_state(SYS_ERR_FAIL, "SYS ERROR Failure") \ + mhi_pm_state(SHUTDOWN_PROCESS, "SHUTDOWN Process") \ + mhi_pm_state_end(LD_ERR_FATAL_DETECT, "Linkdown or Error Fatal Detect") + #define MHI_PM_DISABLE BIT(0) #define MHI_PM_POR BIT(1) #define MHI_PM_M0 BIT(2) @@ -104,14 +149,16 @@ enum mhi_pm_state { #define MHI_PM_FW_DL_ERR BIT(7) #define MHI_PM_SYS_ERR_DETECT BIT(8) #define MHI_PM_SYS_ERR_PROCESS BIT(9) -#define MHI_PM_SHUTDOWN_PROCESS BIT(10) +#define MHI_PM_SYS_ERR_FAIL BIT(10) +#define MHI_PM_SHUTDOWN_PROCESS BIT(11) /* link not accessible */ -#define MHI_PM_LD_ERR_FATAL_DETECT BIT(11) +#define MHI_PM_LD_ERR_FATAL_DETECT BIT(12) #define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \ MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \ MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \ - MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR))) + MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS | \ + MHI_PM_FW_DL_ERR))) #define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR) #define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT) #define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & mhi_cntrl->db_access) @@ -123,13 +170,15 @@ enum mhi_pm_state { MHI_PM_IN_ERROR_STATE(pm_state)) #define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \ (MHI_PM_M3_ENTER | MHI_PM_M3)) +#define MHI_PM_FATAL_ERROR(pm_state) ((pm_state == MHI_PM_FW_DL_ERR) || \ + (pm_state >= MHI_PM_SYS_ERR_FAIL)) #define NR_OF_CMD_RINGS 1 #define CMD_EL_PER_RING 128 #define PRIMARY_CMD_RING 0 #define MHI_DEV_WAKE_DB 127 #define MHI_MAX_MTU 0xffff -#define MHI_RANDOM_U32_NONZERO(bmsk) (prandom_u32_max(bmsk) + 1) +#define MHI_RANDOM_U32_NONZERO(bmsk) (get_random_u32_inclusive(1, bmsk)) enum mhi_er_type { MHI_ER_TYPE_INVALID = 0x0, @@ -215,7 +264,7 @@ struct mhi_chan { /* * Important: When consuming, increment tre_ring first and when * releasing, decrement buf_ring first. If tre_ring has space, buf_ring - * is guranteed to have space so we do not need to check both rings. + * is guaranteed to have space so we do not need to check both rings. */ struct mhi_ring buf_ring; struct mhi_ring tre_ring; @@ -321,7 +370,7 @@ int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, u32 *out); int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, u32 offset, u32 mask, - u32 val, u32 delayus); + u32 val, u32 delayus, u32 timeout_ms); void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, u32 offset, u32 val); int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl, @@ -336,19 +385,12 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, /* Initialization methods */ int mhi_init_mmio(struct mhi_controller *mhi_cntrl); -int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl); -void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl); -int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl); -void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl); int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, struct image_info *img_info); void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl); /* Automatically allocate and queue inbound buffers */ #define MHI_CH_INBOUND_ALLOC_BUFS BIT(0) -int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, - struct mhi_chan *mhi_chan, unsigned int flags); - int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan); void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, @@ -363,6 +405,7 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, struct mhi_event *mhi_event, u32 event_quota); int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, struct mhi_event *mhi_event, u32 event_quota); +void mhi_uevent_notify(struct mhi_controller *mhi_cntrl, enum mhi_ee_type ee); /* ISR handlers */ irqreturn_t mhi_irq_handler(int irq_number, void *dev); diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c index df0fbfee7b78..861551274319 100644 --- a/drivers/bus/mhi/host/main.c +++ b/drivers/bus/mhi/host/main.c @@ -15,6 +15,7 @@ #include <linux/skbuff.h> #include <linux/slab.h> #include "internal.h" +#include "trace.h" int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, u32 offset, u32 *out) @@ -40,10 +41,11 @@ int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, u32 offset, - u32 mask, u32 val, u32 delayus) + u32 mask, u32 val, u32 delayus, + u32 timeout_ms) { int ret; - u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus; + u32 out, retry = (timeout_ms * 1000) / delayus; while (retry--) { ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out); @@ -268,7 +270,8 @@ static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl, static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr) { - return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len; + return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len && + !(addr & (sizeof(struct mhi_ring_element) - 1)); } int mhi_destroy_device(struct device *dev, void *data) @@ -491,11 +494,8 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv) state = mhi_get_mhi_state(mhi_cntrl); ee = mhi_get_exec_env(mhi_cntrl); - dev_dbg(dev, "local ee: %s state: %s device ee: %s state: %s\n", - TO_MHI_EXEC_STR(mhi_cntrl->ee), - mhi_state_str(mhi_cntrl->dev_state), - TO_MHI_EXEC_STR(ee), mhi_state_str(state)); + trace_mhi_intvec_states(mhi_cntrl, ee, state); if (state == MHI_STATE_SYS_ERR) { dev_dbg(dev, "System error detected\n"); pm_state = mhi_tryset_pm_state(mhi_cntrl, @@ -503,7 +503,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv) } write_unlock_irq(&mhi_cntrl->pm_lock); - if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee) + if (pm_state != MHI_PM_SYS_ERR_DETECT) goto exit_intvec; switch (ee) { @@ -512,6 +512,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv) if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) { mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); mhi_cntrl->ee = ee; + mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee); wake_up_all(&mhi_cntrl->state_event); } break; @@ -602,7 +603,7 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, { dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event); struct mhi_ring_element *local_rp, *ev_tre; - void *dev_rp; + void *dev_rp, *next_rp; struct mhi_buf_info *buf_info; u16 xfer_len; @@ -621,6 +622,16 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, result.dir = mhi_chan->dir; local_rp = tre_ring->rp; + + next_rp = local_rp + 1; + if (next_rp >= tre_ring->base + tre_ring->len) + next_rp = tre_ring->base; + if (dev_rp != next_rp && !MHI_TRE_DATA_GET_CHAIN(local_rp)) { + dev_err(&mhi_cntrl->mhi_dev->dev, + "Event element points to an unexpected TRE\n"); + break; + } + while (local_rp != dev_rp) { buf_info = buf_ring->rp; /* If it's the last TRE, get length from the event */ @@ -642,6 +653,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, mhi_del_ring_element(mhi_cntrl, tre_ring); local_rp = tre_ring->rp; + read_unlock_bh(&mhi_chan->lock); + /* notify client */ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); @@ -667,6 +680,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, kfree(buf_info->cb_buf); } } + + read_lock_bh(&mhi_chan->lock); } break; } /* CC_EOT */ @@ -832,6 +847,8 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, while (dev_rp != local_rp) { enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp); + trace_mhi_ctrl_event(mhi_cntrl, local_rp); + switch (type) { case MHI_PKT_TYPE_BW_REQ_EVENT: { @@ -938,7 +955,6 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, if (!mhi_chan->configured) break; parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); - event_quota--; } break; default: @@ -961,7 +977,9 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, } read_lock_bh(&mhi_cntrl->pm_lock); - if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) + + /* Ring EV DB only if there is any pending element to process */ + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count) mhi_ring_er_db(mhi_event); read_unlock_bh(&mhi_cntrl->pm_lock); @@ -996,6 +1014,8 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, while (dev_rp != local_rp && event_quota > 0) { enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp); + trace_mhi_data_event(mhi_cntrl, local_rp); + chan = MHI_TRE_GET_EV_CHID(local_rp); WARN_ON(chan >= mhi_cntrl->max_chan); @@ -1031,7 +1051,9 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, count++; } read_lock_bh(&mhi_cntrl->pm_lock); - if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) + + /* Ring EV DB only if there is any pending element to process */ + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count) mhi_ring_er_db(mhi_event); read_unlock_bh(&mhi_cntrl->pm_lock); @@ -1119,17 +1141,15 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) return -EIO; - read_lock_irqsave(&mhi_cntrl->pm_lock, flags); - ret = mhi_is_ring_full(mhi_cntrl, tre_ring); - if (unlikely(ret)) { - ret = -EAGAIN; - goto exit_unlock; - } + if (unlikely(ret)) + return -EAGAIN; ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags); if (unlikely(ret)) - goto exit_unlock; + return ret; + + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); /* Packet is queued, take a usage ref to exit M3 if necessary * for host->device buffer, balanced put is done on buffer completion @@ -1149,7 +1169,6 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, if (dir == DMA_FROM_DEVICE) mhi_cntrl->runtime_put(mhi_cntrl); -exit_unlock: read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); return ret; @@ -1173,25 +1192,6 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, } EXPORT_SYMBOL_GPL(mhi_queue_skb); -int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir, - struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags) -{ - struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : - mhi_dev->dl_chan; - struct mhi_buf_info buf_info = { }; - - buf_info.p_addr = mhi_buf->dma_addr; - buf_info.cb_buf = mhi_buf; - buf_info.pre_mapped = true; - buf_info.len = len; - - if (unlikely(mhi_chan->pre_alloc)) - return -EINVAL; - - return mhi_queue(mhi_dev, &buf_info, dir, mflags); -} -EXPORT_SYMBOL_GPL(mhi_queue_dma); - int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, struct mhi_buf_info *info, enum mhi_flags flags) { @@ -1199,7 +1199,15 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, struct mhi_ring_element *mhi_tre; struct mhi_buf_info *buf_info; int eot, eob, chain, bei; - int ret; + int ret = 0; + + /* Protect accesses for reading and incrementing WP */ + write_lock_bh(&mhi_chan->lock); + + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) { + ret = -ENODEV; + goto out; + } buf_ring = &mhi_chan->buf_ring; tre_ring = &mhi_chan->tre_ring; @@ -1219,7 +1227,7 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, if (!info->pre_mapped) { ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); if (ret) - return ret; + goto out; } eob = !!(flags & MHI_EOB); @@ -1232,11 +1240,15 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len); mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain); + trace_mhi_gen_tre(mhi_cntrl, mhi_chan, mhi_tre); /* increment WP */ mhi_add_ring_element(mhi_cntrl, tre_ring); mhi_add_ring_element(mhi_cntrl, buf_ring); - return 0; +out: + write_unlock_bh(&mhi_chan->lock); + + return ret; } int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir, @@ -1324,9 +1336,7 @@ static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl, enum mhi_cmd_type cmd = MHI_CMD_NOP; int ret; - dev_dbg(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan, - TO_CH_STATE_TYPE_STR(to_state)); - + trace_mhi_channel_command_start(mhi_cntrl, mhi_chan, to_state, TPS("Updating")); switch (to_state) { case MHI_CH_STATE_TYPE_RESET: write_lock_irq(&mhi_chan->lock); @@ -1393,9 +1403,7 @@ static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl, write_unlock_irq(&mhi_chan->lock); } - dev_dbg(dev, "%d: Channel state change to %s successful\n", - mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); - + trace_mhi_channel_command_end(mhi_cntrl, mhi_chan, to_state, TPS("Updated")); exit_channel_update: mhi_cntrl->runtime_put(mhi_cntrl); mhi_device_put(mhi_cntrl->mhi_dev); @@ -1438,7 +1446,7 @@ exit_unprepare_channel: mutex_unlock(&mhi_chan->mutex); } -int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, +static int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, unsigned int flags) { int ret = 0; @@ -1680,17 +1688,18 @@ void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev) } EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer); -int mhi_poll(struct mhi_device *mhi_dev, u32 budget) +int mhi_get_channel_doorbell_offset(struct mhi_controller *mhi_cntrl, u32 *chdb_offset) { - struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; - struct mhi_chan *mhi_chan = mhi_dev->dl_chan; - struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + void __iomem *base = mhi_cntrl->regs; int ret; - spin_lock_bh(&mhi_event->lock); - ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget); - spin_unlock_bh(&mhi_event->lock); + ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, chdb_offset); + if (ret) { + dev_err(dev, "Unable to read CHDBOFF register\n"); + return -EIO; + } - return ret; + return 0; } -EXPORT_SYMBOL_GPL(mhi_poll); +EXPORT_SYMBOL_GPL(mhi_get_channel_doorbell_offset); diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c index caa4ce28cf9e..e3bc737313a2 100644 --- a/drivers/bus/mhi/host/pci_generic.c +++ b/drivers/bus/mhi/host/pci_generic.c @@ -8,7 +8,6 @@ * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org> */ -#include <linux/aer.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/mhi.h> @@ -24,27 +23,45 @@ #define HEALTH_CHECK_PERIOD (HZ * 2) +/* PCI VID definitions */ +#define PCI_VENDOR_ID_THALES 0x1269 +#define PCI_VENDOR_ID_QUECTEL 0x1eac +#define PCI_VENDOR_ID_NETPRISMA 0x203e + +#define MHI_EDL_DB 91 +#define MHI_EDL_COOKIE 0xEDEDEDED + /** * struct mhi_pci_dev_info - MHI PCI device specific information * @config: MHI controller configuration + * @vf_config: MHI controller configuration for Virtual function (optional) * @name: name of the PCI module * @fw: firmware path (if any) * @edl: emergency download mode firmware path (if any) + * @edl_trigger: capable of triggering EDL mode in the device (if supported) * @bar_num: PCI base address register to use for MHI MMIO register space * @dma_data_width: DMA transfer word size (32 or 64 bits) + * @vf_dma_data_width: DMA transfer word size for VF's (optional) * @mru_default: default MRU size for MBIM network packets * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead * of inband wake support (such as sdx24) + * @no_m3: M3 not supported + * @reset_on_remove: Set true for devices that require SoC during driver removal */ struct mhi_pci_dev_info { const struct mhi_controller_config *config; + const struct mhi_controller_config *vf_config; const char *name; const char *fw; const char *edl; + bool edl_trigger; unsigned int bar_num; unsigned int dma_data_width; + unsigned int vf_dma_data_width; unsigned int mru_default; bool sideband_wake; + bool no_m3; + bool reset_on_remove; }; #define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \ @@ -209,6 +226,19 @@ struct mhi_pci_dev_info { .offload_channel = false, \ } +#define MHI_EVENT_CONFIG_SW_DATA(ev_ring, el_count) \ + { \ + .num_elements = el_count, \ + .irq_moderation_ms = 0, \ + .irq = (ev_ring) + 1, \ + .priority = 1, \ + .mode = MHI_DB_BRST_DISABLE, \ + .data_type = MHI_ER_DATA, \ + .hardware_event = false, \ + .client_managed = false, \ + .offload_channel = false, \ + } + #define MHI_EVENT_CONFIG_HW_DATA(ev_ring, el_count, ch_num) \ { \ .num_elements = el_count, \ @@ -223,6 +253,74 @@ struct mhi_pci_dev_info { .channel = ch_num, \ } +static const struct mhi_channel_config mhi_qcom_qdu100_channels[] = { + MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 2), + MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 2), + MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 128, 1), + MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 128, 1), + MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 3), + MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 3), + MHI_CHANNEL_CONFIG_UL(9, "QDSS", 64, 3), + MHI_CHANNEL_CONFIG_UL(14, "NMEA", 32, 4), + MHI_CHANNEL_CONFIG_DL(15, "NMEA", 32, 4), + MHI_CHANNEL_CONFIG_UL(16, "CSM_CTRL", 32, 4), + MHI_CHANNEL_CONFIG_DL(17, "CSM_CTRL", 32, 4), + MHI_CHANNEL_CONFIG_UL(40, "MHI_PHC", 32, 4), + MHI_CHANNEL_CONFIG_DL(41, "MHI_PHC", 32, 4), + MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 256, 5), + MHI_CHANNEL_CONFIG_DL(47, "IP_SW0", 256, 5), +}; + +static struct mhi_event_config mhi_qcom_qdu100_events[] = { + /* first ring is control+data ring */ + MHI_EVENT_CONFIG_CTRL(0, 64), + /* SAHARA dedicated event ring */ + MHI_EVENT_CONFIG_SW_DATA(1, 256), + /* Software channels dedicated event ring */ + MHI_EVENT_CONFIG_SW_DATA(2, 64), + MHI_EVENT_CONFIG_SW_DATA(3, 256), + MHI_EVENT_CONFIG_SW_DATA(4, 256), + /* Software IP channels dedicated event ring */ + MHI_EVENT_CONFIG_SW_DATA(5, 512), + MHI_EVENT_CONFIG_SW_DATA(6, 512), + MHI_EVENT_CONFIG_SW_DATA(7, 512), +}; + +static const struct mhi_controller_config mhi_qcom_qdu100_config = { + .max_channels = 128, + .timeout_ms = 120000, + .num_channels = ARRAY_SIZE(mhi_qcom_qdu100_channels), + .ch_cfg = mhi_qcom_qdu100_channels, + .num_events = ARRAY_SIZE(mhi_qcom_qdu100_events), + .event_cfg = mhi_qcom_qdu100_events, +}; + +static const struct mhi_pci_dev_info mhi_qcom_qdu100_info = { + .name = "qcom-qdu100", + .fw = "qcom/qdu100/xbl_s.melf", + .edl_trigger = true, + .config = &mhi_qcom_qdu100_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .vf_dma_data_width = 40, + .sideband_wake = false, + .no_m3 = true, + .reset_on_remove = true, +}; + +static const struct mhi_channel_config mhi_qcom_sa8775p_channels[] = { + MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 2048, 1), + MHI_CHANNEL_CONFIG_DL(47, "IP_SW0", 2048, 2), +}; + +static struct mhi_event_config mhi_qcom_sa8775p_events[] = { + /* first ring is control+data ring */ + MHI_EVENT_CONFIG_CTRL(0, 64), + /* Software channels dedicated event ring */ + MHI_EVENT_CONFIG_SW_DATA(1, 64), + MHI_EVENT_CONFIG_SW_DATA(2, 64), +}; + static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = { MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1), MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1), @@ -234,8 +332,10 @@ static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = { MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0), MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0), MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0), - MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2), - MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3), + MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 64, 2), + MHI_CHANNEL_CONFIG_DL(47, "IP_SW0", 64, 3), + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 4), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 5), }; static struct mhi_event_config modem_qcom_v1_mhi_events[] = { @@ -243,9 +343,31 @@ static struct mhi_event_config modem_qcom_v1_mhi_events[] = { MHI_EVENT_CONFIG_CTRL(0, 64), /* DIAG dedicated event ring */ MHI_EVENT_CONFIG_DATA(1, 128), + /* Software channels dedicated event ring */ + MHI_EVENT_CONFIG_SW_DATA(2, 64), + MHI_EVENT_CONFIG_SW_DATA(3, 64), /* Hardware channels request dedicated hardware event rings */ - MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), - MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101) + MHI_EVENT_CONFIG_HW_DATA(4, 1024, 100), + MHI_EVENT_CONFIG_HW_DATA(5, 2048, 101) +}; + +static const struct mhi_controller_config mhi_qcom_sa8775p_config = { + .max_channels = 128, + .timeout_ms = 8000, + .num_channels = ARRAY_SIZE(mhi_qcom_sa8775p_channels), + .ch_cfg = mhi_qcom_sa8775p_channels, + .num_events = ARRAY_SIZE(mhi_qcom_sa8775p_events), + .event_cfg = mhi_qcom_sa8775p_events, +}; + +static const struct mhi_controller_config modem_qcom_v2_mhiv_config = { + .max_channels = 128, + .timeout_ms = 8000, + .ready_timeout_ms = 50000, + .num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels), + .ch_cfg = modem_qcom_v1_mhi_channels, + .num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events), + .event_cfg = modem_qcom_v1_mhi_events, }; static const struct mhi_controller_config modem_qcom_v1_mhiv_config = { @@ -257,10 +379,32 @@ static const struct mhi_controller_config modem_qcom_v1_mhiv_config = { .event_cfg = modem_qcom_v1_mhi_events, }; +static const struct mhi_pci_dev_info mhi_qcom_sa8775p_info = { + .name = "qcom-sa8775p", + .edl_trigger = false, + .config = &mhi_qcom_sa8775p_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = false, +}; + +static const struct mhi_pci_dev_info mhi_qcom_sdx75_info = { + .name = "qcom-sdx75m", + .fw = "qcom/sdx75m/xbl.elf", + .edl = "qcom/sdx75m/edl.mbn", + .edl_trigger = true, + .config = &modem_qcom_v2_mhiv_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .sideband_wake = false, +}; + static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = { .name = "qcom-sdx65m", .fw = "qcom/sdx65m/xbl.elf", .edl = "qcom/sdx65m/edl.mbn", + .edl_trigger = true, .config = &modem_qcom_v1_mhiv_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, @@ -271,6 +415,7 @@ static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = { .name = "qcom-sdx55m", .fw = "qcom/sdx55m/sbl1.mbn", .edl = "qcom/sdx55m/edl.mbn", + .edl_trigger = true, .config = &modem_qcom_v1_mhiv_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, @@ -331,6 +476,16 @@ static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = { .sideband_wake = true, }; +static const struct mhi_pci_dev_info mhi_quectel_rm5xx_info = { + .name = "quectel-rm5xx", + .edl = "qcom/prog_firehose_sdx6x.elf", + .config = &modem_quectel_em1xx_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = true, +}; + static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = { MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0), MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0), @@ -340,6 +495,25 @@ static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = { MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0), MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0), + MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0), + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3), +}; + +static const struct mhi_channel_config mhi_foxconn_sdx61_channels[] = { + MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0), + MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0), + MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1), + MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1), + MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0), + MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0), + MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0), + MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0), + MHI_CHANNEL_CONFIG_UL(50, "NMEA", 32, 0), + MHI_CHANNEL_CONFIG_DL(51, "NMEA", 32, 0), MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2), MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3), }; @@ -360,10 +534,29 @@ static const struct mhi_controller_config modem_foxconn_sdx55_config = { .event_cfg = mhi_foxconn_sdx55_events, }; +static const struct mhi_controller_config modem_foxconn_sdx61_config = { + .max_channels = 128, + .timeout_ms = 20000, + .num_channels = ARRAY_SIZE(mhi_foxconn_sdx61_channels), + .ch_cfg = mhi_foxconn_sdx61_channels, + .num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events), + .event_cfg = mhi_foxconn_sdx55_events, +}; + +static const struct mhi_controller_config modem_foxconn_sdx72_config = { + .max_channels = 128, + .timeout_ms = 20000, + .ready_timeout_ms = 50000, + .num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels), + .ch_cfg = mhi_foxconn_sdx55_channels, + .num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events), + .event_cfg = mhi_foxconn_sdx55_events, +}; + static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = { .name = "foxconn-sdx55", - .fw = "qcom/sdx55m/sbl1.mbn", - .edl = "qcom/sdx55m/edl.mbn", + .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn", + .edl_trigger = true, .config = &modem_foxconn_sdx55_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, @@ -371,8 +564,10 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = { .sideband_wake = false, }; -static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = { - .name = "foxconn-sdx65", +static const struct mhi_pci_dev_info mhi_foxconn_t99w175_info = { + .name = "foxconn-t99w175", + .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn", + .edl_trigger = true, .config = &modem_foxconn_sdx55_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, @@ -380,6 +575,105 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = { .sideband_wake = false, }; +static const struct mhi_pci_dev_info mhi_foxconn_dw5930e_info = { + .name = "foxconn-dw5930e", + .edl = "qcom/sdx55m/foxconn/prog_firehose_sdx55.mbn", + .edl_trigger = true, + .config = &modem_foxconn_sdx55_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = false, +}; + +static const struct mhi_pci_dev_info mhi_foxconn_t99w368_info = { + .name = "foxconn-t99w368", + .edl = "qcom/sdx65m/foxconn/prog_firehose_lite.elf", + .edl_trigger = true, + .config = &modem_foxconn_sdx55_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = false, +}; + +static const struct mhi_pci_dev_info mhi_foxconn_t99w373_info = { + .name = "foxconn-t99w373", + .edl = "qcom/sdx65m/foxconn/prog_firehose_lite.elf", + .edl_trigger = true, + .config = &modem_foxconn_sdx55_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = false, +}; + +static const struct mhi_pci_dev_info mhi_foxconn_t99w510_info = { + .name = "foxconn-t99w510", + .edl = "qcom/sdx24m/foxconn/prog_firehose_sdx24.mbn", + .edl_trigger = true, + .config = &modem_foxconn_sdx55_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = false, +}; + +static const struct mhi_pci_dev_info mhi_foxconn_dw5932e_info = { + .name = "foxconn-dw5932e", + .edl = "qcom/sdx65m/foxconn/prog_firehose_lite.elf", + .edl_trigger = true, + .config = &modem_foxconn_sdx55_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = false, +}; + +static const struct mhi_pci_dev_info mhi_foxconn_t99w640_info = { + .name = "foxconn-t99w640", + .edl = "qcom/sdx72m/foxconn/edl.mbn", + .edl_trigger = true, + .config = &modem_foxconn_sdx72_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = false, +}; + +static const struct mhi_pci_dev_info mhi_foxconn_dw5934e_info = { + .name = "foxconn-dw5934e", + .edl = "qcom/sdx72m/foxconn/edl.mbn", + .edl_trigger = true, + .config = &modem_foxconn_sdx72_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = false, +}; + +static const struct mhi_pci_dev_info mhi_foxconn_t99w696_info = { + .name = "foxconn-t99w696", + .edl = "qcom/sdx61/foxconn/prog_firehose_lite.elf", + .edl_trigger = true, + .config = &modem_foxconn_sdx61_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = false, +}; + +static const struct mhi_pci_dev_info mhi_foxconn_t99w760_info = { + .name = "foxconn-t99w760", + .edl = "qcom/sdx35/foxconn/xbl_s_devprg_ns.melf", + .edl_trigger = true, + .config = &modem_foxconn_sdx61_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = false, +}; + static const struct mhi_channel_config mhi_mv3x_channels[] = { MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0), MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0), @@ -460,6 +754,7 @@ static const struct mhi_pci_dev_info mhi_sierra_em919x_info = { .config = &modem_sierra_em919x_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, + .mru_default = 32768, .sideband_wake = false, }; @@ -478,7 +773,7 @@ static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = { MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101) }; -static struct mhi_controller_config modem_telit_fn980_hw_v1_config = { +static const struct mhi_controller_config modem_telit_fn980_hw_v1_config = { .max_channels = 128, .timeout_ms = 20000, .num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels), @@ -538,13 +833,108 @@ static const struct mhi_pci_dev_info mhi_telit_fn990_info = { .mru_default = 32768, }; +static const struct mhi_pci_dev_info mhi_telit_fe990a_info = { + .name = "telit-fe990a", + .config = &modem_telit_fn990_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .sideband_wake = false, + .mru_default = 32768, +}; + +static const struct mhi_channel_config mhi_telit_fn920c04_channels[] = { + MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0), + MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0), + MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1), + MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1), + MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0), + MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0), + MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0), + MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0), + MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1), + MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1), + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3), +}; + +static const struct mhi_controller_config modem_telit_fn920c04_config = { + .max_channels = 128, + .timeout_ms = 50000, + .num_channels = ARRAY_SIZE(mhi_telit_fn920c04_channels), + .ch_cfg = mhi_telit_fn920c04_channels, + .num_events = ARRAY_SIZE(mhi_telit_fn990_events), + .event_cfg = mhi_telit_fn990_events, +}; + +static const struct mhi_pci_dev_info mhi_telit_fn920c04_info = { + .name = "telit-fn920c04", + .config = &modem_telit_fn920c04_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .sideband_wake = false, + .mru_default = 32768, + .edl_trigger = true, +}; + +static const struct mhi_pci_dev_info mhi_telit_fn990b40_info = { + .name = "telit-fn990b40", + .config = &modem_telit_fn920c04_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .sideband_wake = false, + .mru_default = 32768, + .edl_trigger = true, +}; + +static const struct mhi_pci_dev_info mhi_telit_fe990b40_info = { + .name = "telit-fe990b40", + .config = &modem_telit_fn920c04_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .sideband_wake = false, + .mru_default = 32768, + .edl_trigger = true, +}; + +static const struct mhi_pci_dev_info mhi_netprisma_lcur57_info = { + .name = "netprisma-lcur57", + .edl = "qcom/prog_firehose_sdx24.mbn", + .config = &modem_quectel_em1xx_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = true, +}; + +static const struct mhi_pci_dev_info mhi_netprisma_fcun69_info = { + .name = "netprisma-fcun69", + .edl = "qcom/prog_firehose_sdx6x.elf", + .config = &modem_quectel_em1xx_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = true, +}; + /* Keep the list sorted based on the PID. New VID should be added as the last entry */ static const struct pci_device_id mhi_pci_id_table[] = { + {PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0116), + .driver_data = (kernel_ulong_t) &mhi_qcom_sa8775p_info }, + /* Telit FN920C04 (sdx35) */ + {PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x011a, 0x1c5d, 0x2020), + .driver_data = (kernel_ulong_t) &mhi_telit_fn920c04_info }, { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, PCI_VENDOR_ID_QCOM, 0x010c), + .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, /* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200), .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info }, + /* EM929x (sdx65), use the same configuration as EM919x */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x18d7, 0x0301), + .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info }, /* Telit FN980 hardware revision v1 */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000), .driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info }, @@ -553,47 +943,107 @@ static const struct pci_device_id mhi_pci_id_table[] = { /* Telit FN990 */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010), .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info }, + /* Telit FE990A */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015), + .driver_data = (kernel_ulong_t) &mhi_telit_fe990a_info }, + /* Foxconn T99W696, all variants */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, PCI_VENDOR_ID_FOXCONN, PCI_ANY_ID), + .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w696_info }, { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info }, - { PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */ + /* Telit FN990B40 (sdx72) */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0309, 0x1c5d, 0x201a), + .driver_data = (kernel_ulong_t) &mhi_telit_fn990b40_info }, + /* Telit FE990B40 (sdx72) */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0309, 0x1c5d, 0x2025), + .driver_data = (kernel_ulong_t) &mhi_telit_fe990b40_info }, + { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309), + .driver_data = (kernel_ulong_t) &mhi_qcom_sdx75_info }, + /* QDU100, x100-DU */ + { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0601), + .driver_data = (kernel_ulong_t) &mhi_qcom_qdu100_info }, + { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */ + .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, + { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, - { PCI_DEVICE(0x1eac, 0x1002), /* EM160R-GL (sdx24) */ + /* RM520N-GL (sdx6x), eSIM */ + { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1004), + .driver_data = (kernel_ulong_t) &mhi_quectel_rm5xx_info }, + /* RM520N-GL (sdx6x), Lenovo variant */ + { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1007), + .driver_data = (kernel_ulong_t) &mhi_quectel_rm5xx_info }, + { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x100d), /* EM160R-GL (sdx24) */ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, - { PCI_DEVICE(0x1eac, 0x2001), /* EM120R-GL for FCCL (sdx24) */ + { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x2001), /* EM120R-GL for FCCL (sdx24) */ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, /* T99W175 (sdx55), Both for eSIM and Non-eSIM */ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab), - .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, + .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info }, /* DW5930e (sdx55), With eSIM, It's also T99W175 */ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b0), - .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, + .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5930e_info }, /* DW5930e (sdx55), Non-eSIM, It's also T99W175 */ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1), - .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, + .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5930e_info }, /* T99W175 (sdx55), Based on Qualcomm new baseline */ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf), - .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, + .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info }, /* T99W175 (sdx55) */ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0c3), - .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, + .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info }, /* T99W368 (sdx65) */ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8), - .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info }, + .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w368_info }, /* T99W373 (sdx62) */ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9), - .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info }, + .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w373_info }, + /* T99W510 (sdx24), variant 1 */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f0), + .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w510_info }, + /* T99W510 (sdx24), variant 2 */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f1), + .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w510_info }, + /* T99W510 (sdx24), variant 3 */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f2), + .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w510_info }, + /* DW5932e-eSIM (sdx62), With eSIM */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f5), + .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5932e_info }, + /* DW5932e (sdx62), Non-eSIM */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f9), + .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5932e_info }, + /* T99W640 (sdx72) */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe118), + .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w640_info }, + /* DW5934e(sdx72), With eSIM */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe11d), + .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5934e_info }, + /* DW5934e(sdx72), Non-eSIM */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe11e), + .driver_data = (kernel_ulong_t) &mhi_foxconn_dw5934e_info }, + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe123), + .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w760_info }, /* MV31-W (Cinterion) */ - { PCI_DEVICE(0x1269, 0x00b3), + { PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b3), .driver_data = (kernel_ulong_t) &mhi_mv31_info }, /* MV31-W (Cinterion), based on new baseline */ - { PCI_DEVICE(0x1269, 0x00b4), + { PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b4), .driver_data = (kernel_ulong_t) &mhi_mv31_info }, /* MV32-WA (Cinterion) */ - { PCI_DEVICE(0x1269, 0x00ba), + { PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00ba), .driver_data = (kernel_ulong_t) &mhi_mv32_info }, /* MV32-WB (Cinterion) */ - { PCI_DEVICE(0x1269, 0x00bb), + { PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00bb), .driver_data = (kernel_ulong_t) &mhi_mv32_info }, + /* T99W175 (sdx55), HP variant */ + { PCI_DEVICE(0x03f0, 0x0a6c), + .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w175_info }, + /* NETPRISMA LCUR57 (SDX24) */ + { PCI_DEVICE(PCI_VENDOR_ID_NETPRISMA, 0x1000), + .driver_data = (kernel_ulong_t) &mhi_netprisma_lcur57_info }, + /* NETPRISMA FCUN69 (SDX6X) */ + { PCI_DEVICE(PCI_VENDOR_ID_NETPRISMA, 0x1001), + .driver_data = (kernel_ulong_t) &mhi_netprisma_fcun69_info }, { } }; MODULE_DEVICE_TABLE(pci, mhi_pci_id_table); @@ -609,6 +1059,7 @@ struct mhi_pci_device { struct work_struct recovery_work; struct timer_list health_check_timer; unsigned long status; + bool reset_on_remove; }; static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl, @@ -664,7 +1115,7 @@ static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl) struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); u16 vendor = 0; - if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor)) + if (pci_read_config_word(pci_physfn(pdev), PCI_VENDOR_ID, &vendor)) return false; if (vendor == (u16) ~0 || vendor == 0) @@ -679,22 +1130,18 @@ static int mhi_pci_claim(struct mhi_controller *mhi_cntrl, struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); int err; - err = pci_assign_resource(pdev, bar_num); - if (err) - return err; - err = pcim_enable_device(pdev); if (err) { dev_err(&pdev->dev, "failed to enable pci device: %d\n", err); return err; } - err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev)); - if (err) { + mhi_cntrl->regs = pcim_iomap_region(pdev, bar_num, pci_name(pdev)); + if (IS_ERR(mhi_cntrl->regs)) { + err = PTR_ERR(mhi_cntrl->regs); dev_err(&pdev->dev, "failed to map pci region: %d\n", err); return err; } - mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num]; mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num); err = dma_set_mask_and_coherent(&pdev->dev, dma_mask); @@ -721,7 +1168,7 @@ static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl, */ mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events; - nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI); + nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSIX | PCI_IRQ_MSI); if (nr_vectors < 0) { dev_err(&pdev->dev, "Error allocating MSI vectors %d\n", nr_vectors); @@ -779,7 +1226,9 @@ static void mhi_pci_recovery_work(struct work_struct *work) dev_warn(&pdev->dev, "device recovery started\n"); - del_timer(&mhi_pdev->health_check_timer); + if (pdev->is_physfn) + timer_delete(&mhi_pdev->health_check_timer); + pm_runtime_forbid(&pdev->dev); /* Clean up MHI state */ @@ -806,19 +1255,24 @@ static void mhi_pci_recovery_work(struct work_struct *work) dev_dbg(&pdev->dev, "Recovery completed\n"); set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status); - mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); + + if (pdev->is_physfn) + mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); + return; err_unprepare: mhi_unprepare_after_power_down(mhi_cntrl); err_try_reset: - if (pci_reset_function(pdev)) - dev_err(&pdev->dev, "Recovery failed\n"); + err = pci_try_reset_function(pdev); + if (err) + dev_err(&pdev->dev, "Recovery failed: %d\n", err); } static void health_check(struct timer_list *t) { - struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer); + struct mhi_pci_device *mhi_pdev = timer_container_of(mhi_pdev, t, + health_check_timer); struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) || @@ -835,12 +1289,47 @@ static void health_check(struct timer_list *t) mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); } +static int mhi_pci_generic_edl_trigger(struct mhi_controller *mhi_cntrl) +{ + void __iomem *base = mhi_cntrl->regs; + void __iomem *edl_db; + int ret; + u32 val; + + ret = mhi_device_get_sync(mhi_cntrl->mhi_dev); + if (ret) { + dev_err(mhi_cntrl->cntrl_dev, "Failed to wakeup the device\n"); + return ret; + } + + pm_wakeup_event(&mhi_cntrl->mhi_dev->dev, 0); + mhi_cntrl->runtime_get(mhi_cntrl); + + ret = mhi_get_channel_doorbell_offset(mhi_cntrl, &val); + if (ret) + goto err_get_chdb; + + edl_db = base + val + (8 * MHI_EDL_DB); + + mhi_cntrl->write_reg(mhi_cntrl, edl_db + 4, upper_32_bits(MHI_EDL_COOKIE)); + mhi_cntrl->write_reg(mhi_cntrl, edl_db, lower_32_bits(MHI_EDL_COOKIE)); + + mhi_soc_reset(mhi_cntrl); + +err_get_chdb: + mhi_cntrl->runtime_put(mhi_cntrl); + mhi_device_put(mhi_cntrl->mhi_dev); + + return ret; +} + static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data; const struct mhi_controller_config *mhi_cntrl_config; struct mhi_pci_device *mhi_pdev; struct mhi_controller *mhi_cntrl; + unsigned int dma_data_width; int err; dev_info(&pdev->dev, "MHI PCI device found: %s\n", info->name); @@ -851,14 +1340,24 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return -ENOMEM; INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work); - timer_setup(&mhi_pdev->health_check_timer, health_check, 0); - mhi_cntrl_config = info->config; + if (pdev->is_virtfn && info->vf_config) + mhi_cntrl_config = info->vf_config; + else + mhi_cntrl_config = info->config; + + /* Initialize health check monitor only for Physical functions */ + if (pdev->is_physfn) + timer_setup(&mhi_pdev->health_check_timer, health_check, 0); + mhi_cntrl = &mhi_pdev->mhi_cntrl; + dma_data_width = (pdev->is_virtfn && info->vf_dma_data_width) ? + info->vf_dma_data_width : info->dma_data_width; + mhi_cntrl->cntrl_dev = &pdev->dev; mhi_cntrl->iova_start = 0; - mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width); + mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(dma_data_width); mhi_cntrl->fw_image = info->fw; mhi_cntrl->edl_image = info->edl; @@ -868,6 +1367,13 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mhi_cntrl->runtime_get = mhi_pci_runtime_get; mhi_cntrl->runtime_put = mhi_pci_runtime_put; mhi_cntrl->mru = info->mru_default; + mhi_cntrl->name = info->name; + + if (pdev->is_physfn) + mhi_pdev->reset_on_remove = info->reset_on_remove; + + if (info->edl_trigger) + mhi_cntrl->edl_trigger = mhi_pci_generic_edl_trigger; if (info->sideband_wake) { mhi_cntrl->wake_get = mhi_pci_wake_get_nop; @@ -875,7 +1381,7 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop; } - err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width)); + err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(dma_data_width)); if (err) return err; @@ -892,11 +1398,9 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mhi_pdev->pci_state = pci_store_saved_state(pdev); pci_load_saved_state(pdev, NULL); - pci_enable_pcie_error_reporting(pdev); - err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config); if (err) - goto err_disable_reporting; + return err; /* MHI bus does not power up the controller by default */ err = mhi_prepare_for_power_up(mhi_cntrl); @@ -914,10 +1418,11 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status); /* start health check */ - mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); + if (pdev->is_physfn) + mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); - /* Only allow runtime-suspend if PME capable (for wakeup) */ - if (pci_pme_capable(pdev, PCI_D3hot)) { + /* Allow runtime suspend only if both PME from D3Hot and M3 are supported */ + if (pci_pme_capable(pdev, PCI_D3hot) && !(info->no_m3)) { pm_runtime_set_autosuspend_delay(&pdev->dev, 2000); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_mark_last_busy(&pdev->dev); @@ -930,8 +1435,6 @@ err_unprepare: mhi_unprepare_after_power_down(mhi_cntrl); err_unregister: mhi_unregister_controller(mhi_cntrl); -err_disable_reporting: - pci_disable_pcie_error_reporting(pdev); return err; } @@ -941,7 +1444,10 @@ static void mhi_pci_remove(struct pci_dev *pdev) struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev); struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl; - del_timer_sync(&mhi_pdev->health_check_timer); + pci_disable_sriov(pdev); + + if (pdev->is_physfn) + timer_delete_sync(&mhi_pdev->health_check_timer); cancel_work_sync(&mhi_pdev->recovery_work); if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { @@ -953,8 +1459,10 @@ static void mhi_pci_remove(struct pci_dev *pdev) if (pci_pme_capable(pdev, PCI_D3hot)) pm_runtime_get_noresume(&pdev->dev); + if (mhi_pdev->reset_on_remove) + mhi_soc_reset(mhi_cntrl); + mhi_unregister_controller(mhi_cntrl); - pci_disable_pcie_error_reporting(pdev); } static void mhi_pci_shutdown(struct pci_dev *pdev) @@ -970,7 +1478,8 @@ static void mhi_pci_reset_prepare(struct pci_dev *pdev) dev_info(&pdev->dev, "reset\n"); - del_timer(&mhi_pdev->health_check_timer); + if (pdev->is_physfn) + timer_delete(&mhi_pdev->health_check_timer); /* Clean up MHI state */ if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { @@ -1015,7 +1524,8 @@ static void mhi_pci_reset_done(struct pci_dev *pdev) } set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status); - mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); + if (pdev->is_physfn) + mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); } static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev, @@ -1080,7 +1590,9 @@ static int __maybe_unused mhi_pci_runtime_suspend(struct device *dev) if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status)) return 0; - del_timer(&mhi_pdev->health_check_timer); + if (pdev->is_physfn) + timer_delete(&mhi_pdev->health_check_timer); + cancel_work_sync(&mhi_pdev->recovery_work); if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) || @@ -1131,7 +1643,8 @@ static int __maybe_unused mhi_pci_runtime_resume(struct device *dev) } /* Resume health check */ - mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); + if (pdev->is_physfn) + mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD); /* It can be a remote wakeup (no mhi runtime_get), update access time */ pm_runtime_mark_last_busy(dev); @@ -1217,7 +1730,8 @@ static struct pci_driver mhi_pci_driver = { .remove = mhi_pci_remove, .shutdown = mhi_pci_shutdown, .err_handler = &mhi_pci_err_handler, - .driver.pm = &mhi_pci_pm_ops + .driver.pm = &mhi_pci_pm_ops, + .sriov_configure = pci_sriov_configure_simple, }; module_pci_driver(mhi_pci_driver); diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c index 4a42186ff111..b4ef115189b5 100644 --- a/drivers/bus/mhi/host/pm.c +++ b/drivers/bus/mhi/host/pm.c @@ -15,6 +15,7 @@ #include <linux/slab.h> #include <linux/wait.h> #include "internal.h" +#include "trace.h" /* * Not all MHI state transitions are synchronous. Transitions like Linkdown, @@ -36,7 +37,10 @@ * M0 <--> M0 * M0 -> FW_DL_ERR * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0 - * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR + * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS + * SYS_ERR_PROCESS -> SYS_ERR_FAIL + * SYS_ERR_FAIL -> SYS_ERR_DETECT + * SYS_ERR_PROCESS --> POR * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT * SHUTDOWN_PROCESS -> DISABLE * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT @@ -93,7 +97,12 @@ static const struct mhi_pm_transitions dev_state_transitions[] = { }, { MHI_PM_SYS_ERR_PROCESS, - MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_POR | MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_SYS_ERR_FAIL, + MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT }, /* L2 States */ @@ -123,6 +132,7 @@ enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cn if (unlikely(!(dev_state_transitions[index].to_states & state))) return cur_state; + trace_mhi_tryset_pm_state(mhi_cntrl, state); mhi_cntrl->pm_state = state; return mhi_cntrl->pm_state; } @@ -163,6 +173,7 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) enum mhi_pm_state cur_state; struct device *dev = &mhi_cntrl->mhi_dev->dev; u32 interval_us = 25000; /* poll register field every 25 milliseconds */ + u32 timeout_ms; int ret, i; /* Check if device entered error state */ @@ -173,14 +184,18 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) /* Wait for RESET to be cleared and READY bit to be set by the device */ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_RESET_MASK, 0, interval_us); + MHICTRL_RESET_MASK, 0, interval_us, + mhi_cntrl->timeout_ms); if (ret) { dev_err(dev, "Device failed to clear MHI Reset\n"); return ret; } + timeout_ms = mhi_cntrl->ready_timeout_ms ? + mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms; ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, - MHISTATUS_READY_MASK, 1, interval_us); + MHISTATUS_READY_MASK, 1, interval_us, + timeout_ms); if (ret) { dev_err(dev, "Device failed to enter MHI Ready\n"); return ret; @@ -301,7 +316,8 @@ int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) read_lock_irq(&mhi_chan->lock); /* Only ring DB if ring is not empty */ - if (tre_ring->base && tre_ring->wp != tre_ring->rp) + if (tre_ring->base && tre_ring->wp != tre_ring->rp && + mhi_chan->ch_state == MHI_CH_STATE_ENABLED) mhi_ring_chan_db(mhi_cntrl, mhi_chan); read_unlock_irq(&mhi_chan->lock); } @@ -402,6 +418,7 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) device_for_each_child(&mhi_cntrl->mhi_dev->dev, ¤t_ee, mhi_destroy_device); mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE); + mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee); /* Force MHI to be in M0 state before continuing */ ret = __mhi_device_get_sync(mhi_cntrl); @@ -452,7 +469,8 @@ error_mission_mode: } /* Handle shutdown transitions */ -static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) +static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, + bool destroy_device) { enum mhi_pm_state cur_state; struct mhi_event *mhi_event; @@ -469,12 +487,16 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) /* Trigger MHI RESET so that the device will not access host memory */ if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { + /* Skip MHI RESET if in RDDM state */ + if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM) + goto skip_mhi_reset; + dev_dbg(dev, "Triggering MHI Reset in device\n"); mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); /* Wait for the reset bit to be cleared by the device */ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_RESET_MASK, 0, 25000); + MHICTRL_RESET_MASK, 0, 25000, mhi_cntrl->timeout_ms); if (ret) dev_err(dev, "Device failed to clear MHI Reset\n"); @@ -487,13 +509,14 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) { /* wait for ready to be set */ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, - MHISTATUS, - MHISTATUS_READY_MASK, 1, 25000); + MHISTATUS, MHISTATUS_READY_MASK, + 1, 25000, mhi_cntrl->timeout_ms); if (ret) dev_err(dev, "Device failed to enter READY state\n"); } } +skip_mhi_reset: dev_dbg(dev, "Waiting for all pending event ring processing to complete\n"); mhi_event = mhi_cntrl->mhi_event; @@ -509,8 +532,16 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) dev_dbg(dev, "Waiting for all pending threads to complete\n"); wake_up_all(&mhi_cntrl->state_event); - dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); - device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); + /* + * Only destroy the 'struct device' for channels if indicated by the + * 'destroy_device' flag. Because, during system suspend or hibernation + * state, there is no need to destroy the 'struct device' as the endpoint + * device would still be physically attached to the machine. + */ + if (destroy_device) { + dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); + device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); + } mutex_lock(&mhi_cntrl->pm_mutex); @@ -572,6 +603,7 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl) struct mhi_cmd *mhi_cmd; struct mhi_event_ctxt *er_ctxt; struct device *dev = &mhi_cntrl->mhi_dev->dev; + bool reset_device = false; int ret, i; dev_dbg(dev, "Transitioning from PM state: %s to: %s\n", @@ -600,8 +632,25 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl) /* Wake up threads waiting for state transition */ wake_up_all(&mhi_cntrl->state_event); - /* Trigger MHI RESET so that the device will not access host memory */ + mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee); + if (MHI_REG_ACCESS_VALID(prev_state)) { + /* + * If the device is in PBL or SBL, it will only respond to + * RESET if the device is in SYSERR state. SYSERR might + * already be cleared at this point. + */ + enum mhi_state cur_state = mhi_get_mhi_state(mhi_cntrl); + enum mhi_ee_type cur_ee = mhi_get_exec_env(mhi_cntrl); + + if (cur_state == MHI_STATE_SYS_ERR) + reset_device = true; + else if (cur_ee != MHI_EE_PBL && cur_ee != MHI_EE_SBL) + reset_device = true; + } + + /* Trigger MHI RESET so that the device will not access host memory */ + if (reset_device) { u32 in_reset = -1; unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); @@ -618,7 +667,13 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl) !in_reset, timeout); if (!ret || in_reset) { dev_err(dev, "Device failed to exit MHI Reset state\n"); - goto exit_sys_error_transition; + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_FAIL); + write_unlock_irq(&mhi_cntrl->pm_lock); + /* Shutdown may have occurred, otherwise cleanup now */ + if (cur_state != MHI_PM_SYS_ERR_FAIL) + goto exit_sys_error_transition; } /* @@ -747,7 +802,6 @@ void mhi_pm_st_worker(struct work_struct *work) struct mhi_controller *mhi_cntrl = container_of(work, struct mhi_controller, st_worker); - struct device *dev = &mhi_cntrl->mhi_dev->dev; spin_lock_irq(&mhi_cntrl->transition_lock); list_splice_tail_init(&mhi_cntrl->transition_list, &head); @@ -755,8 +809,8 @@ void mhi_pm_st_worker(struct work_struct *work) list_for_each_entry_safe(itr, tmp, &head, node) { list_del(&itr->node); - dev_dbg(dev, "Handling state transition: %s\n", - TO_DEV_STATE_TRANS_STR(itr->state)); + + trace_mhi_pm_st_transition(mhi_cntrl, itr->state); switch (itr->state) { case DEV_ST_TRANSITION_PBL: @@ -778,6 +832,8 @@ void mhi_pm_st_worker(struct work_struct *work) mhi_create_devices(mhi_cntrl); if (mhi_cntrl->fbc_download) mhi_download_amss_image(mhi_cntrl); + + mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee); break; case DEV_ST_TRANSITION_MISSION_MODE: mhi_pm_mission_mode_transition(mhi_cntrl); @@ -787,6 +843,7 @@ void mhi_pm_st_worker(struct work_struct *work) mhi_cntrl->ee = MHI_EE_FP; write_unlock_irq(&mhi_cntrl->pm_lock); mhi_create_devices(mhi_cntrl); + mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee); break; case DEV_ST_TRANSITION_READY: mhi_ready_state_transition(mhi_cntrl); @@ -795,7 +852,10 @@ void mhi_pm_st_worker(struct work_struct *work) mhi_pm_sys_error_transition(mhi_cntrl); break; case DEV_ST_TRANSITION_DISABLE: - mhi_pm_disable_transition(mhi_cntrl); + mhi_pm_disable_transition(mhi_cntrl, false); + break; + case DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE: + mhi_pm_disable_transition(mhi_cntrl, true); break; default: break; @@ -1105,7 +1165,8 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl) if (state == MHI_STATE_SYS_ERR) { mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_RESET_MASK, 0, interval_us); + MHICTRL_RESET_MASK, 0, interval_us, + mhi_cntrl->timeout_ms); if (ret) { dev_info(dev, "Failed to reset MHI due to syserr state\n"); goto error_exit; @@ -1148,7 +1209,8 @@ error_exit: } EXPORT_SYMBOL_GPL(mhi_async_power_up); -void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) +static void __mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful, + bool destroy_device) { enum mhi_pm_state cur_state, transition_state; struct device *dev = &mhi_cntrl->mhi_dev->dev; @@ -1184,26 +1246,49 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) write_unlock_irq(&mhi_cntrl->pm_lock); mutex_unlock(&mhi_cntrl->pm_mutex); - mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE); + mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee); + + if (destroy_device) + mhi_queue_state_transition(mhi_cntrl, + DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE); + else + mhi_queue_state_transition(mhi_cntrl, + DEV_ST_TRANSITION_DISABLE); /* Wait for shutdown to complete */ flush_work(&mhi_cntrl->st_worker); disable_irq(mhi_cntrl->irq[0]); } + +void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) +{ + __mhi_power_down(mhi_cntrl, graceful, true); +} EXPORT_SYMBOL_GPL(mhi_power_down); +void mhi_power_down_keep_dev(struct mhi_controller *mhi_cntrl, + bool graceful) +{ + __mhi_power_down(mhi_cntrl, graceful, false); +} +EXPORT_SYMBOL_GPL(mhi_power_down_keep_dev); + int mhi_sync_power_up(struct mhi_controller *mhi_cntrl) { int ret = mhi_async_power_up(mhi_cntrl); + u32 timeout_ms; if (ret) return ret; + /* Some devices need more time to set ready during power up */ + timeout_ms = mhi_cntrl->ready_timeout_ms ? + mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms; wait_event_timeout(mhi_cntrl->state_event, MHI_IN_MISSION_MODE(mhi_cntrl->ee) || - MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), - msecs_to_jiffies(mhi_cntrl->timeout_ms)); + MHI_PM_FATAL_ERROR(mhi_cntrl->pm_state), + msecs_to_jiffies(timeout_ms)); ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT; if (ret) @@ -1235,20 +1320,6 @@ int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl) } EXPORT_SYMBOL_GPL(mhi_force_rddm_mode); -void mhi_device_get(struct mhi_device *mhi_dev) -{ - struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; - - mhi_dev->dev_wake++; - read_lock_bh(&mhi_cntrl->pm_lock); - if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) - mhi_trigger_resume(mhi_cntrl); - - mhi_cntrl->wake_get(mhi_cntrl, true); - read_unlock_bh(&mhi_cntrl->pm_lock); -} -EXPORT_SYMBOL_GPL(mhi_device_get); - int mhi_device_get_sync(struct mhi_device *mhi_dev) { struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; @@ -1275,3 +1346,22 @@ void mhi_device_put(struct mhi_device *mhi_dev) read_unlock_bh(&mhi_cntrl->pm_lock); } EXPORT_SYMBOL_GPL(mhi_device_put); + +void mhi_uevent_notify(struct mhi_controller *mhi_cntrl, enum mhi_ee_type ee) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + char *buf[2]; + int ret; + + buf[0] = kasprintf(GFP_KERNEL, "EXEC_ENV=%s", TO_MHI_EXEC_STR(ee)); + buf[1] = NULL; + + if (!buf[0]) + return; + + ret = kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, buf); + if (ret) + dev_err(dev, "Failed to send %s uevent\n", TO_MHI_EXEC_STR(ee)); + + kfree(buf[0]); +} diff --git a/drivers/bus/mhi/host/trace.h b/drivers/bus/mhi/host/trace.h new file mode 100644 index 000000000000..3e0c41777429 --- /dev/null +++ b/drivers/bus/mhi/host/trace.h @@ -0,0 +1,283 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mhi_host + +#if !defined(_TRACE_EVENT_MHI_HOST_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_EVENT_MHI_HOST_H + +#include <linux/byteorder/generic.h> +#include <linux/tracepoint.h> +#include <linux/trace_seq.h> +#include "../common.h" +#include "internal.h" + +#undef mhi_state +#undef mhi_state_end + +#define mhi_state(a, b) TRACE_DEFINE_ENUM(MHI_STATE_##a); +#define mhi_state_end(a, b) TRACE_DEFINE_ENUM(MHI_STATE_##a); + +MHI_STATE_LIST + +#undef mhi_state +#undef mhi_state_end + +#define mhi_state(a, b) { MHI_STATE_##a, b }, +#define mhi_state_end(a, b) { MHI_STATE_##a, b } + +#undef mhi_pm_state +#undef mhi_pm_state_end + +#define mhi_pm_state(a, b) TRACE_DEFINE_ENUM(MHI_PM_STATE_##a); +#define mhi_pm_state_end(a, b) TRACE_DEFINE_ENUM(MHI_PM_STATE_##a); + +MHI_PM_STATE_LIST + +#undef mhi_pm_state +#undef mhi_pm_state_end + +#define mhi_pm_state(a, b) { MHI_PM_STATE_##a, b }, +#define mhi_pm_state_end(a, b) { MHI_PM_STATE_##a, b } + +#undef mhi_ee +#undef mhi_ee_end + +#define mhi_ee(a, b) TRACE_DEFINE_ENUM(MHI_EE_##a); +#define mhi_ee_end(a, b) TRACE_DEFINE_ENUM(MHI_EE_##a); + +MHI_EE_LIST + +#undef mhi_ee +#undef mhi_ee_end + +#define mhi_ee(a, b) { MHI_EE_##a, b }, +#define mhi_ee_end(a, b) { MHI_EE_##a, b } + +#undef ch_state_type +#undef ch_state_type_end + +#define ch_state_type(a, b) TRACE_DEFINE_ENUM(MHI_CH_STATE_TYPE_##a); +#define ch_state_type_end(a, b) TRACE_DEFINE_ENUM(MHI_CH_STATE_TYPE_##a); + +MHI_CH_STATE_TYPE_LIST + +#undef ch_state_type +#undef ch_state_type_end + +#define ch_state_type(a, b) { MHI_CH_STATE_TYPE_##a, b }, +#define ch_state_type_end(a, b) { MHI_CH_STATE_TYPE_##a, b } + +#undef dev_st_trans +#undef dev_st_trans_end + +#define dev_st_trans(a, b) TRACE_DEFINE_ENUM(DEV_ST_TRANSITION_##a); +#define dev_st_trans_end(a, b) TRACE_DEFINE_ENUM(DEV_ST_TRANSITION_##a); + +DEV_ST_TRANSITION_LIST + +#undef dev_st_trans +#undef dev_st_trans_end + +#define dev_st_trans(a, b) { DEV_ST_TRANSITION_##a, b }, +#define dev_st_trans_end(a, b) { DEV_ST_TRANSITION_##a, b } + +#define TPS(x) tracepoint_string(x) + +TRACE_EVENT(mhi_gen_tre, + + TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + struct mhi_ring_element *mhi_tre), + + TP_ARGS(mhi_cntrl, mhi_chan, mhi_tre), + + TP_STRUCT__entry( + __string(name, mhi_cntrl->mhi_dev->name) + __field(int, ch_num) + __field(void *, wp) + __field(uint64_t, tre_ptr) + __field(uint32_t, dword0) + __field(uint32_t, dword1) + ), + + TP_fast_assign( + __assign_str(name); + __entry->ch_num = mhi_chan->chan; + __entry->wp = mhi_tre; + __entry->tre_ptr = le64_to_cpu(mhi_tre->ptr); + __entry->dword0 = le32_to_cpu(mhi_tre->dword[0]); + __entry->dword1 = le32_to_cpu(mhi_tre->dword[1]); + ), + + TP_printk("%s: Chan: %d TRE: 0x%p TRE buf: 0x%llx DWORD0: 0x%08x DWORD1: 0x%08x\n", + __get_str(name), __entry->ch_num, __entry->wp, __entry->tre_ptr, + __entry->dword0, __entry->dword1) +); + +TRACE_EVENT(mhi_intvec_states, + + TP_PROTO(struct mhi_controller *mhi_cntrl, int dev_ee, int dev_state), + + TP_ARGS(mhi_cntrl, dev_ee, dev_state), + + TP_STRUCT__entry( + __string(name, mhi_cntrl->mhi_dev->name) + __field(int, local_ee) + __field(int, state) + __field(int, dev_ee) + __field(int, dev_state) + ), + + TP_fast_assign( + __assign_str(name); + __entry->local_ee = mhi_cntrl->ee; + __entry->state = mhi_cntrl->dev_state; + __entry->dev_ee = dev_ee; + __entry->dev_state = dev_state; + ), + + TP_printk("%s: Local EE: %s State: %s Device EE: %s Dev State: %s\n", + __get_str(name), + __print_symbolic(__entry->local_ee, MHI_EE_LIST), + __print_symbolic(__entry->state, MHI_STATE_LIST), + __print_symbolic(__entry->dev_ee, MHI_EE_LIST), + __print_symbolic(__entry->dev_state, MHI_STATE_LIST)) +); + +TRACE_EVENT(mhi_tryset_pm_state, + + TP_PROTO(struct mhi_controller *mhi_cntrl, int pm_state), + + TP_ARGS(mhi_cntrl, pm_state), + + TP_STRUCT__entry( + __string(name, mhi_cntrl->mhi_dev->name) + __field(int, pm_state) + ), + + TP_fast_assign( + __assign_str(name); + if (pm_state) + pm_state = __fls(pm_state); + __entry->pm_state = pm_state; + ), + + TP_printk("%s: PM state: %s\n", __get_str(name), + __print_symbolic(__entry->pm_state, MHI_PM_STATE_LIST)) +); + +DECLARE_EVENT_CLASS(mhi_process_event_ring, + + TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_ring_element *rp), + + TP_ARGS(mhi_cntrl, rp), + + TP_STRUCT__entry( + __string(name, mhi_cntrl->mhi_dev->name) + __field(uint32_t, dword0) + __field(uint32_t, dword1) + __field(int, state) + __field(uint64_t, ptr) + __field(void *, rp) + ), + + TP_fast_assign( + __assign_str(name); + __entry->rp = rp; + __entry->ptr = le64_to_cpu(rp->ptr); + __entry->dword0 = le32_to_cpu(rp->dword[0]); + __entry->dword1 = le32_to_cpu(rp->dword[1]); + __entry->state = MHI_TRE_GET_EV_STATE(rp); + ), + + TP_printk("%s: TRE: 0x%p TRE buf: 0x%llx DWORD0: 0x%08x DWORD1: 0x%08x State: %s\n", + __get_str(name), __entry->rp, __entry->ptr, __entry->dword0, + __entry->dword1, __print_symbolic(__entry->state, MHI_STATE_LIST)) +); + +DEFINE_EVENT(mhi_process_event_ring, mhi_data_event, + + TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_ring_element *rp), + + TP_ARGS(mhi_cntrl, rp) +); + +DEFINE_EVENT(mhi_process_event_ring, mhi_ctrl_event, + + TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_ring_element *rp), + + TP_ARGS(mhi_cntrl, rp) +); + +DECLARE_EVENT_CLASS(mhi_update_channel_state, + + TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, int state, + const char *reason), + + TP_ARGS(mhi_cntrl, mhi_chan, state, reason), + + TP_STRUCT__entry( + __string(name, mhi_cntrl->mhi_dev->name) + __field(int, ch_num) + __field(int, state) + __field(const char *, reason) + ), + + TP_fast_assign( + __assign_str(name); + __entry->ch_num = mhi_chan->chan; + __entry->state = state; + __entry->reason = reason; + ), + + TP_printk("%s: chan%d: %s state to: %s\n", + __get_str(name), __entry->ch_num, __entry->reason, + __print_symbolic(__entry->state, MHI_CH_STATE_TYPE_LIST)) +); + +DEFINE_EVENT(mhi_update_channel_state, mhi_channel_command_start, + + TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, int state, + const char *reason), + + TP_ARGS(mhi_cntrl, mhi_chan, state, reason) +); + +DEFINE_EVENT(mhi_update_channel_state, mhi_channel_command_end, + + TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, int state, + const char *reason), + + TP_ARGS(mhi_cntrl, mhi_chan, state, reason) +); + +TRACE_EVENT(mhi_pm_st_transition, + + TP_PROTO(struct mhi_controller *mhi_cntrl, int state), + + TP_ARGS(mhi_cntrl, state), + + TP_STRUCT__entry( + __string(name, mhi_cntrl->mhi_dev->name) + __field(int, state) + ), + + TP_fast_assign( + __assign_str(name); + __entry->state = state; + ), + + TP_printk("%s: Handling state transition: %s\n", __get_str(name), + __print_symbolic(__entry->state, DEV_ST_TRANSITION_LIST)) +); + +#endif +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../drivers/bus/mhi/host +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +#include <trace/define_trace.h> diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c index fca0d0669aa9..12dd32fd0b62 100644 --- a/drivers/bus/mips_cdmm.c +++ b/drivers/bus/mips_cdmm.c @@ -37,7 +37,7 @@ /* Each block of device registers is 64 bytes */ #define CDMM_DRB_SIZE 64 -#define to_mips_cdmm_driver(d) container_of(d, struct mips_cdmm_driver, drv) +#define to_mips_cdmm_driver(d) container_of_const(d, struct mips_cdmm_driver, drv) /* Default physical base address */ static phys_addr_t mips_cdmm_default_base; @@ -59,17 +59,17 @@ mips_cdmm_lookup(const struct mips_cdmm_device_id *table, return ret ? table : NULL; } -static int mips_cdmm_match(struct device *dev, struct device_driver *drv) +static int mips_cdmm_match(struct device *dev, const struct device_driver *drv) { struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev); - struct mips_cdmm_driver *cdrv = to_mips_cdmm_driver(drv); + const struct mips_cdmm_driver *cdrv = to_mips_cdmm_driver(drv); return mips_cdmm_lookup(cdrv->id_table, cdev) != NULL; } -static int mips_cdmm_uevent(struct device *dev, struct kobj_uevent_env *env) +static int mips_cdmm_uevent(const struct device *dev, struct kobj_uevent_env *env) { - struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev); + const struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev); int retval = 0; retval = add_uevent_var(env, "CDMM_CPU=%u", cdev->cpu); @@ -118,7 +118,7 @@ static struct attribute *mips_cdmm_dev_attrs[] = { }; ATTRIBUTE_GROUPS(mips_cdmm_dev); -struct bus_type mips_cdmm_bustype = { +const struct bus_type mips_cdmm_bustype = { .name = "cdmm", .dev_groups = mips_cdmm_dev_groups, .match = mips_cdmm_match, diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c index 5eb0fe73ddc4..7ce61d629a87 100644 --- a/drivers/bus/moxtet.c +++ b/drivers/bus/moxtet.c @@ -83,10 +83,10 @@ static const struct attribute_group *moxtet_dev_groups[] = { NULL, }; -static int moxtet_match(struct device *dev, struct device_driver *drv) +static int moxtet_match(struct device *dev, const struct device_driver *drv) { struct moxtet_device *mdev = to_moxtet_device(dev); - struct moxtet_driver *tdrv = to_moxtet_driver(drv); + const struct moxtet_driver *tdrv = to_moxtet_driver(drv); const enum turris_mox_module_id *t; if (of_driver_match_device(dev, drv)) @@ -102,7 +102,7 @@ static int moxtet_match(struct device *dev, struct device_driver *drv) return 0; } -static struct bus_type moxtet_bus_type = { +static const struct bus_type moxtet_bus_type = { .name = "moxtet", .dev_groups = moxtet_dev_groups, .match = moxtet_match, @@ -484,7 +484,6 @@ static const struct file_operations input_fops = { .owner = THIS_MODULE, .open = moxtet_debug_open, .read = input_read, - .llseek = no_llseek, }; static ssize_t output_read(struct file *file, char __user *buf, size_t len, @@ -549,7 +548,6 @@ static const struct file_operations output_fops = { .open = moxtet_debug_open, .read = output_read, .write = output_write, - .llseek = no_llseek, }; static int moxtet_register_debugfs(struct moxtet *moxtet) @@ -659,7 +657,7 @@ static void moxtet_irq_print_chip(struct irq_data *d, struct seq_file *p) id = moxtet->modules[pos->idx]; - seq_printf(p, " moxtet-%s.%i#%i", mox_module_name(id), pos->idx, + seq_printf(p, "moxtet-%s.%i#%i", mox_module_name(id), pos->idx, pos->bit); } @@ -739,9 +737,8 @@ static int moxtet_irq_setup(struct moxtet *moxtet) { int i, ret; - moxtet->irq.domain = irq_domain_add_simple(moxtet->dev->of_node, - MOXTET_NIRQS, 0, - &moxtet_irq_domain, moxtet); + moxtet->irq.domain = irq_domain_create_simple(dev_fwnode(moxtet->dev), MOXTET_NIRQS, 0, + &moxtet_irq_domain, moxtet); if (moxtet->irq.domain == NULL) { dev_err(moxtet->dev, "Could not add IRQ domain\n"); return -ENOMEM; @@ -755,7 +752,7 @@ static int moxtet_irq_setup(struct moxtet *moxtet) moxtet->irq.masked = ~0; ret = request_threaded_irq(moxtet->dev_irq, NULL, moxtet_irq_thread_fn, - IRQF_ONESHOT, "moxtet", moxtet); + IRQF_SHARED | IRQF_ONESHOT, "moxtet", moxtet); if (ret < 0) goto err_free; @@ -830,6 +827,12 @@ static void moxtet_remove(struct spi_device *spi) mutex_destroy(&moxtet->lock); } +static const struct spi_device_id moxtet_spi_ids[] = { + { "moxtet" }, + { }, +}; +MODULE_DEVICE_TABLE(spi, moxtet_spi_ids); + static const struct of_device_id moxtet_dt_ids[] = { { .compatible = "cznic,moxtet" }, {}, @@ -841,6 +844,7 @@ static struct spi_driver moxtet_spi_driver = { .name = "moxtet", .of_match_table = moxtet_dt_ids, }, + .id_table = moxtet_spi_ids, .probe = moxtet_probe, .remove = moxtet_remove, }; diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index d51573ac525e..dd94145c9b22 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c @@ -1006,7 +1006,7 @@ static __init int mvebu_mbus_debugfs_init(void) } fs_initcall(mvebu_mbus_debugfs_init); -static int mvebu_mbus_suspend(void) +static int mvebu_mbus_suspend(void *data) { struct mvebu_mbus_state *s = &mbus_state; int win; @@ -1040,7 +1040,7 @@ static int mvebu_mbus_suspend(void) return 0; } -static void mvebu_mbus_resume(void) +static void mvebu_mbus_resume(void *data) { struct mvebu_mbus_state *s = &mbus_state; int win; @@ -1069,9 +1069,13 @@ static void mvebu_mbus_resume(void) } } -static struct syscore_ops mvebu_mbus_syscore_ops = { - .suspend = mvebu_mbus_suspend, - .resume = mvebu_mbus_resume, +static const struct syscore_ops mvebu_mbus_syscore_ops = { + .suspend = mvebu_mbus_suspend, + .resume = mvebu_mbus_resume, +}; + +static struct syscore mvebu_mbus_syscore = { + .ops = &mvebu_mbus_syscore_ops, }; static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus, @@ -1118,7 +1122,7 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus, writel(UNIT_SYNC_BARRIER_ALL, mbus->mbuswins_base + UNIT_SYNC_BARRIER_OFF); - register_syscore_ops(&mvebu_mbus_syscore_ops); + register_syscore(&mvebu_mbus_syscore); return 0; } @@ -1179,74 +1183,32 @@ static int __init mbus_dt_setup_win(struct mvebu_mbus_state *mbus, return 0; } -static int __init -mbus_parse_ranges(struct device_node *node, - int *addr_cells, int *c_addr_cells, int *c_size_cells, - int *cell_count, const __be32 **ranges_start, - const __be32 **ranges_end) -{ - const __be32 *prop; - int ranges_len, tuple_len; - - /* Allow a node with no 'ranges' property */ - *ranges_start = of_get_property(node, "ranges", &ranges_len); - if (*ranges_start == NULL) { - *addr_cells = *c_addr_cells = *c_size_cells = *cell_count = 0; - *ranges_start = *ranges_end = NULL; - return 0; - } - *ranges_end = *ranges_start + ranges_len / sizeof(__be32); - - *addr_cells = of_n_addr_cells(node); - - prop = of_get_property(node, "#address-cells", NULL); - *c_addr_cells = be32_to_cpup(prop); - - prop = of_get_property(node, "#size-cells", NULL); - *c_size_cells = be32_to_cpup(prop); - - *cell_count = *addr_cells + *c_addr_cells + *c_size_cells; - tuple_len = (*cell_count) * sizeof(__be32); - - if (ranges_len % tuple_len) { - pr_warn("malformed ranges entry '%pOFn'\n", node); - return -EINVAL; - } - return 0; -} - static int __init mbus_dt_setup(struct mvebu_mbus_state *mbus, struct device_node *np) { - int addr_cells, c_addr_cells, c_size_cells; - int i, ret, cell_count; - const __be32 *r, *ranges_start, *ranges_end; + int ret; + struct of_range_parser parser; + struct of_range range; - ret = mbus_parse_ranges(np, &addr_cells, &c_addr_cells, - &c_size_cells, &cell_count, - &ranges_start, &ranges_end); + ret = of_range_parser_init(&parser, np); if (ret < 0) - return ret; + return 0; - for (i = 0, r = ranges_start; r < ranges_end; r += cell_count, i++) { - u32 windowid, base, size; + for_each_of_range(&parser, &range) { + u32 windowid = upper_32_bits(range.bus_addr); u8 target, attr; /* * An entry with a non-zero custom field do not * correspond to a static window, so skip it. */ - windowid = of_read_number(r, 1); if (CUSTOM(windowid)) continue; target = TARGET(windowid); attr = ATTR(windowid); - base = of_read_number(r + c_addr_cells, addr_cells); - size = of_read_number(r + c_addr_cells + addr_cells, - c_size_cells); - ret = mbus_dt_setup_win(mbus, base, size, target, attr); + ret = mbus_dt_setup_win(mbus, range.cpu_addr, range.size, target, attr); if (ret < 0) return ret; } diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c index e02d0656242b..e4dfda7b3b10 100644 --- a/drivers/bus/omap-ocp2scp.c +++ b/drivers/bus/omap-ocp2scp.c @@ -84,12 +84,10 @@ err0: return ret; } -static int omap_ocp2scp_remove(struct platform_device *pdev) +static void omap_ocp2scp_remove(struct platform_device *pdev) { pm_runtime_disable(&pdev->dev); device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices); - - return 0; } #ifdef CONFIG_OF diff --git a/drivers/bus/omap_l3_smx.c b/drivers/bus/omap_l3_smx.c index bb1606f5ce2d..7f0a8f8b3f4c 100644 --- a/drivers/bus/omap_l3_smx.c +++ b/drivers/bus/omap_l3_smx.c @@ -15,7 +15,6 @@ #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_device.h> #include "omap_l3_smx.h" @@ -166,19 +165,10 @@ static irqreturn_t omap3_l3_app_irq(int irq, void *_l3) irqreturn_t ret = IRQ_NONE; int_type = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR; - if (!int_type) { + if (!int_type) status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_0); - /* - * if we have a timeout error, there's nothing we can - * do besides rebooting the board. So let's BUG on any - * of such errors and handle the others. timeout error - * is severe and not expected to occur. - */ - BUG_ON(status & L3_STATUS_0_TIMEOUT_MASK); - } else { + else status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_1); - /* No timeout error for debug sources */ - } /* identify the error source */ err_source = __ffs(status); @@ -190,6 +180,14 @@ static irqreturn_t omap3_l3_app_irq(int irq, void *_l3) ret |= omap3_l3_block_irq(l3, error, error_addr); } + /* + * if we have a timeout error, there's nothing we can + * do besides rebooting the board. So let's BUG on any + * of such errors and handle the others. timeout error + * is severe and not expected to occur. + */ + BUG_ON(!int_type && status & L3_STATUS_0_TIMEOUT_MASK); + /* Clear the status register */ clear = (L3_AGENT_STATUS_CLEAR_IA << int_type) | L3_AGENT_STATUS_CLEAR_TA; @@ -263,7 +261,7 @@ err0: return ret; } -static int omap3_l3_remove(struct platform_device *pdev) +static void omap3_l3_remove(struct platform_device *pdev) { struct omap3_l3 *l3 = platform_get_drvdata(pdev); @@ -271,8 +269,6 @@ static int omap3_l3_remove(struct platform_device *pdev) free_irq(l3->debug_irq, l3); iounmap(l3->rt); kfree(l3); - - return 0; } static struct platform_driver omap3_l3_driver = { diff --git a/drivers/bus/qcom-ebi2.c b/drivers/bus/qcom-ebi2.c index 663c82749222..c1fef1b4bd89 100644 --- a/drivers/bus/qcom-ebi2.c +++ b/drivers/bus/qcom-ebi2.c @@ -403,4 +403,3 @@ static struct platform_driver qcom_ebi2_driver = { module_platform_driver(qcom_ebi2_driver); MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>"); MODULE_DESCRIPTION("Qualcomm EBI2 driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/bus/qcom-ssc-block-bus.c b/drivers/bus/qcom-ssc-block-bus.c index eedeb29a5ff3..7f5fd4e0940d 100644 --- a/drivers/bus/qcom-ssc-block-bus.c +++ b/drivers/bus/qcom-ssc-block-bus.c @@ -264,18 +264,6 @@ static int qcom_ssc_block_bus_probe(struct platform_device *pdev) platform_set_drvdata(pdev, data); - data->pd_names = qcom_ssc_block_pd_names; - data->num_pds = ARRAY_SIZE(qcom_ssc_block_pd_names); - - /* power domains */ - ret = qcom_ssc_block_bus_pds_attach(&pdev->dev, data->pds, data->pd_names, data->num_pds); - if (ret < 0) - return dev_err_probe(&pdev->dev, ret, "error when attaching power domains\n"); - - ret = qcom_ssc_block_bus_pds_enable(data->pds, data->num_pds); - if (ret < 0) - return dev_err_probe(&pdev->dev, ret, "error when enabling power domains\n"); - /* low level overrides for when the HW logic doesn't "just work" */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpm_sscaon_config0"); data->reg_mpm_sscaon_config0 = devm_ioremap_resource(&pdev->dev, res); @@ -343,28 +331,42 @@ static int qcom_ssc_block_bus_probe(struct platform_device *pdev) data->ssc_axi_halt = halt_args.args[0]; + /* power domains */ + data->pd_names = qcom_ssc_block_pd_names; + data->num_pds = ARRAY_SIZE(qcom_ssc_block_pd_names); + + ret = qcom_ssc_block_bus_pds_attach(&pdev->dev, data->pds, data->pd_names, data->num_pds); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, "error when attaching power domains\n"); + + ret = qcom_ssc_block_bus_pds_enable(data->pds, data->num_pds); + if (ret < 0) { + dev_err_probe(&pdev->dev, ret, "error when enabling power domains\n"); + goto err_detach_pds_bus; + } + qcom_ssc_block_bus_init(&pdev->dev); of_platform_populate(np, NULL, NULL, &pdev->dev); return 0; + +err_detach_pds_bus: + qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds); + + return ret; } -static int qcom_ssc_block_bus_remove(struct platform_device *pdev) +static void qcom_ssc_block_bus_remove(struct platform_device *pdev) { struct qcom_ssc_block_bus_data *data = platform_get_drvdata(pdev); qcom_ssc_block_bus_deinit(&pdev->dev); - iounmap(data->reg_mpm_sscaon_config0); - iounmap(data->reg_mpm_sscaon_config1); - qcom_ssc_block_bus_pds_disable(data->pds, data->num_pds); qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds); pm_runtime_disable(&pdev->dev); pm_clk_destroy(&pdev->dev); - - return 0; } static const struct of_device_id qcom_ssc_block_bus_of_match[] = { @@ -386,4 +388,3 @@ module_platform_driver(qcom_ssc_block_bus_driver); MODULE_DESCRIPTION("A driver for handling the init sequence needed for accessing the SSC block on (some) qcom SoCs over AHB"); MODULE_AUTHOR("Michael Srba <Michael.Srba@seznam.cz>"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c index 6b8d6257ed8a..d8e029e7e53f 100644 --- a/drivers/bus/simple-pm-bus.c +++ b/drivers/bus/simple-pm-bus.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Simple Power-Managed Bus Driver * @@ -8,17 +9,26 @@ * for more details. */ +#include <linux/clk.h> #include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +struct simple_pm_bus { + struct clk_bulk_data *clks; + int num_clks; +}; + static int simple_pm_bus_probe(struct platform_device *pdev) { const struct device *dev = &pdev->dev; const struct of_dev_auxdata *lookup = dev_get_platdata(dev); struct device_node *np = dev->of_node; const struct of_device_id *match; + struct simple_pm_bus *bus; /* * Allow user to use driver_override to bind this driver to a @@ -44,6 +54,16 @@ static int simple_pm_bus_probe(struct platform_device *pdev) return -ENODEV; } + bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL); + if (!bus) + return -ENOMEM; + + bus->num_clks = devm_clk_bulk_get_all(&pdev->dev, &bus->clks); + if (bus->num_clks < 0) + return dev_err_probe(&pdev->dev, bus->num_clks, "failed to get clocks\n"); + + dev_set_drvdata(&pdev->dev, bus); + dev_dbg(&pdev->dev, "%s\n", __func__); pm_runtime_enable(&pdev->dev); @@ -54,19 +74,66 @@ static int simple_pm_bus_probe(struct platform_device *pdev) return 0; } -static int simple_pm_bus_remove(struct platform_device *pdev) +static void simple_pm_bus_remove(struct platform_device *pdev) { const void *data = of_device_get_match_data(&pdev->dev); if (pdev->driver_override || data) - return 0; + return; dev_dbg(&pdev->dev, "%s\n", __func__); pm_runtime_disable(&pdev->dev); +} + +static int simple_pm_bus_runtime_suspend(struct device *dev) +{ + struct simple_pm_bus *bus = dev_get_drvdata(dev); + + clk_bulk_disable_unprepare(bus->num_clks, bus->clks); + + return 0; +} + +static int simple_pm_bus_runtime_resume(struct device *dev) +{ + struct simple_pm_bus *bus = dev_get_drvdata(dev); + int ret; + + ret = clk_bulk_prepare_enable(bus->num_clks, bus->clks); + if (ret) { + dev_err(dev, "failed to enable clocks: %d\n", ret); + return ret; + } + return 0; } +static int simple_pm_bus_suspend(struct device *dev) +{ + struct simple_pm_bus *bus = dev_get_drvdata(dev); + + if (!bus) + return 0; + + return pm_runtime_force_suspend(dev); +} + +static int simple_pm_bus_resume(struct device *dev) +{ + struct simple_pm_bus *bus = dev_get_drvdata(dev); + + if (!bus) + return 0; + + return pm_runtime_force_resume(dev); +} + +static const struct dev_pm_ops simple_pm_bus_pm_ops = { + RUNTIME_PM_OPS(simple_pm_bus_runtime_suspend, simple_pm_bus_runtime_resume, NULL) + NOIRQ_SYSTEM_SLEEP_PM_OPS(simple_pm_bus_suspend, simple_pm_bus_resume) +}; + #define ONLY_BUS ((void *) 1) /* Match if the device is only a bus. */ static const struct of_device_id simple_pm_bus_of_match[] = { @@ -85,6 +152,7 @@ static struct platform_driver simple_pm_bus_driver = { .driver = { .name = "simple-pm-bus", .of_match_table = simple_pm_bus_of_match, + .pm = pm_ptr(&simple_pm_bus_pm_ops), }, }; @@ -92,4 +160,3 @@ module_platform_driver(simple_pm_bus_driver); MODULE_DESCRIPTION("Simple Power-Managed Bus Driver"); MODULE_AUTHOR("Geert Uytterhoeven <geert+renesas@glider.be>"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/bus/stm32_etzpc.c b/drivers/bus/stm32_etzpc.c new file mode 100644 index 000000000000..7fc0f16960be --- /dev/null +++ b/drivers/bus/stm32_etzpc.c @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023, STMicroelectronics - All Rights Reserved + */ + +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/types.h> + +#include "stm32_firewall.h" + +/* + * ETZPC registers + */ +#define ETZPC_DECPROT 0x10 +#define ETZPC_HWCFGR 0x3F0 + +/* + * HWCFGR register + */ +#define ETZPC_HWCFGR_NUM_TZMA GENMASK(7, 0) +#define ETZPC_HWCFGR_NUM_PER_SEC GENMASK(15, 8) +#define ETZPC_HWCFGR_NUM_AHB_SEC GENMASK(23, 16) +#define ETZPC_HWCFGR_CHUNKS1N4 GENMASK(31, 24) + +/* + * ETZPC miscellaneous + */ +#define ETZPC_PROT_MASK GENMASK(1, 0) +#define ETZPC_PROT_A7NS 0x3 +#define ETZPC_DECPROT_SHIFT 1 + +#define IDS_PER_DECPROT_REGS 16 + +static int stm32_etzpc_grant_access(struct stm32_firewall_controller *ctrl, u32 firewall_id) +{ + u32 offset, reg_offset, sec_val; + + if (firewall_id >= ctrl->max_entries) { + dev_err(ctrl->dev, "Invalid sys bus ID %u", firewall_id); + return -EINVAL; + } + + /* Check access configuration, 16 peripherals per register */ + reg_offset = ETZPC_DECPROT + 0x4 * (firewall_id / IDS_PER_DECPROT_REGS); + offset = (firewall_id % IDS_PER_DECPROT_REGS) << ETZPC_DECPROT_SHIFT; + + /* Verify peripheral is non-secure and attributed to cortex A7 */ + sec_val = (readl(ctrl->mmio + reg_offset) >> offset) & ETZPC_PROT_MASK; + if (sec_val != ETZPC_PROT_A7NS) { + dev_dbg(ctrl->dev, "Invalid bus configuration: reg_offset %#x, value %d\n", + reg_offset, sec_val); + return -EACCES; + } + + return 0; +} + +static void stm32_etzpc_release_access(struct stm32_firewall_controller *ctrl __maybe_unused, + u32 firewall_id __maybe_unused) +{ +} + +static int stm32_etzpc_probe(struct platform_device *pdev) +{ + struct stm32_firewall_controller *etzpc_controller; + struct device_node *np = pdev->dev.of_node; + u32 nb_per, nb_master; + struct resource *res; + void __iomem *mmio; + int rc; + + etzpc_controller = devm_kzalloc(&pdev->dev, sizeof(*etzpc_controller), GFP_KERNEL); + if (!etzpc_controller) + return -ENOMEM; + + mmio = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(mmio)) + return PTR_ERR(mmio); + + etzpc_controller->dev = &pdev->dev; + etzpc_controller->mmio = mmio; + etzpc_controller->name = dev_driver_string(etzpc_controller->dev); + etzpc_controller->type = STM32_PERIPHERAL_FIREWALL | STM32_MEMORY_FIREWALL; + etzpc_controller->grant_access = stm32_etzpc_grant_access; + etzpc_controller->release_access = stm32_etzpc_release_access; + + /* Get number of etzpc entries*/ + nb_per = FIELD_GET(ETZPC_HWCFGR_NUM_PER_SEC, + readl(etzpc_controller->mmio + ETZPC_HWCFGR)); + nb_master = FIELD_GET(ETZPC_HWCFGR_NUM_AHB_SEC, + readl(etzpc_controller->mmio + ETZPC_HWCFGR)); + etzpc_controller->max_entries = nb_per + nb_master; + + platform_set_drvdata(pdev, etzpc_controller); + + rc = stm32_firewall_controller_register(etzpc_controller); + if (rc) { + dev_err(etzpc_controller->dev, "Couldn't register as a firewall controller: %d", + rc); + return rc; + } + + rc = stm32_firewall_populate_bus(etzpc_controller); + if (rc) { + dev_err(etzpc_controller->dev, "Couldn't populate ETZPC bus: %d", + rc); + return rc; + } + + /* Populate all allowed nodes */ + return of_platform_populate(np, NULL, NULL, &pdev->dev); +} + +static const struct of_device_id stm32_etzpc_of_match[] = { + { .compatible = "st,stm32-etzpc" }, + {} +}; +MODULE_DEVICE_TABLE(of, stm32_etzpc_of_match); + +static struct platform_driver stm32_etzpc_driver = { + .probe = stm32_etzpc_probe, + .driver = { + .name = "stm32-etzpc", + .of_match_table = stm32_etzpc_of_match, + }, +}; +module_platform_driver(stm32_etzpc_driver); + +MODULE_AUTHOR("Gatien Chevallier <gatien.chevallier@foss.st.com>"); +MODULE_DESCRIPTION("STMicroelectronics ETZPC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/bus/stm32_firewall.c b/drivers/bus/stm32_firewall.c new file mode 100644 index 000000000000..2fc9761dadec --- /dev/null +++ b/drivers/bus/stm32_firewall.c @@ -0,0 +1,294 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023, STMicroelectronics - All Rights Reserved + */ + +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/bus/stm32_firewall_device.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <linux/slab.h> + +#include "stm32_firewall.h" + +/* Corresponds to STM32_FIREWALL_MAX_EXTRA_ARGS + firewall ID */ +#define STM32_FIREWALL_MAX_ARGS (STM32_FIREWALL_MAX_EXTRA_ARGS + 1) + +static LIST_HEAD(firewall_controller_list); +static DEFINE_MUTEX(firewall_controller_list_lock); + +/* Firewall device API */ + +int stm32_firewall_get_firewall(struct device_node *np, struct stm32_firewall *firewall, + unsigned int nb_firewall) +{ + struct stm32_firewall_controller *ctrl; + struct of_phandle_iterator it; + unsigned int i, j = 0; + int err; + + if (!firewall || !nb_firewall) + return -EINVAL; + + /* Parse property with phandle parsed out */ + of_for_each_phandle(&it, err, np, "access-controllers", "#access-controller-cells", 0) { + struct of_phandle_args provider_args; + struct device_node *provider = it.node; + const char *fw_entry; + bool match = false; + + if (err) { + pr_err("Unable to get access-controllers property for node %s\n, err: %d", + np->full_name, err); + of_node_put(provider); + return err; + } + + if (j >= nb_firewall) { + pr_err("Too many firewall controllers"); + of_node_put(provider); + return -EINVAL; + } + + provider_args.args_count = of_phandle_iterator_args(&it, provider_args.args, + STM32_FIREWALL_MAX_ARGS); + + /* Check if the parsed phandle corresponds to a registered firewall controller */ + mutex_lock(&firewall_controller_list_lock); + list_for_each_entry(ctrl, &firewall_controller_list, entry) { + if (ctrl->dev->of_node->phandle == it.phandle) { + match = true; + firewall[j].firewall_ctrl = ctrl; + break; + } + } + mutex_unlock(&firewall_controller_list_lock); + + if (!match) { + firewall[j].firewall_ctrl = NULL; + pr_err("No firewall controller registered for %s\n", np->full_name); + of_node_put(provider); + return -ENODEV; + } + + err = of_property_read_string_index(np, "access-controller-names", j, &fw_entry); + if (err == 0) + firewall[j].entry = fw_entry; + + /* Handle the case when there are no arguments given along with the phandle */ + if (provider_args.args_count < 0 || + provider_args.args_count > STM32_FIREWALL_MAX_ARGS) { + of_node_put(provider); + return -EINVAL; + } else if (provider_args.args_count == 0) { + firewall[j].extra_args_size = 0; + firewall[j].firewall_id = U32_MAX; + j++; + continue; + } + + /* The firewall ID is always the first argument */ + firewall[j].firewall_id = provider_args.args[0]; + + /* Extra args start at the second argument */ + for (i = 0; i < provider_args.args_count - 1; i++) + firewall[j].extra_args[i] = provider_args.args[i + 1]; + + /* Remove the firewall ID arg that is not an extra argument */ + firewall[j].extra_args_size = provider_args.args_count - 1; + + j++; + } + + return 0; +} +EXPORT_SYMBOL_GPL(stm32_firewall_get_firewall); + +int stm32_firewall_grant_access(struct stm32_firewall *firewall) +{ + struct stm32_firewall_controller *firewall_controller; + + if (!firewall || firewall->firewall_id == U32_MAX) + return -EINVAL; + + firewall_controller = firewall->firewall_ctrl; + + if (!firewall_controller) + return -ENODEV; + + return firewall_controller->grant_access(firewall_controller, firewall->firewall_id); +} +EXPORT_SYMBOL_GPL(stm32_firewall_grant_access); + +int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id) +{ + struct stm32_firewall_controller *firewall_controller; + + if (!firewall || subsystem_id == U32_MAX || firewall->firewall_id == U32_MAX) + return -EINVAL; + + firewall_controller = firewall->firewall_ctrl; + + if (!firewall_controller) + return -ENODEV; + + return firewall_controller->grant_access(firewall_controller, subsystem_id); +} +EXPORT_SYMBOL_GPL(stm32_firewall_grant_access_by_id); + +void stm32_firewall_release_access(struct stm32_firewall *firewall) +{ + struct stm32_firewall_controller *firewall_controller; + + if (!firewall || firewall->firewall_id == U32_MAX) { + pr_debug("Incorrect arguments when releasing a firewall access\n"); + return; + } + + firewall_controller = firewall->firewall_ctrl; + + if (!firewall_controller) { + pr_debug("No firewall controller to release\n"); + return; + } + + firewall_controller->release_access(firewall_controller, firewall->firewall_id); +} +EXPORT_SYMBOL_GPL(stm32_firewall_release_access); + +void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id) +{ + struct stm32_firewall_controller *firewall_controller; + + if (!firewall || subsystem_id == U32_MAX || firewall->firewall_id == U32_MAX) { + pr_debug("Incorrect arguments when releasing a firewall access"); + return; + } + + firewall_controller = firewall->firewall_ctrl; + + if (!firewall_controller) { + pr_debug("No firewall controller to release"); + return; + } + + firewall_controller->release_access(firewall_controller, subsystem_id); +} +EXPORT_SYMBOL_GPL(stm32_firewall_release_access_by_id); + +/* Firewall controller API */ + +int stm32_firewall_controller_register(struct stm32_firewall_controller *firewall_controller) +{ + struct stm32_firewall_controller *ctrl; + + if (!firewall_controller) + return -ENODEV; + + pr_info("Registering %s firewall controller\n", firewall_controller->name); + + mutex_lock(&firewall_controller_list_lock); + list_for_each_entry(ctrl, &firewall_controller_list, entry) { + if (ctrl == firewall_controller) { + pr_debug("%s firewall controller already registered\n", + firewall_controller->name); + mutex_unlock(&firewall_controller_list_lock); + return 0; + } + } + list_add_tail(&firewall_controller->entry, &firewall_controller_list); + mutex_unlock(&firewall_controller_list_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(stm32_firewall_controller_register); + +void stm32_firewall_controller_unregister(struct stm32_firewall_controller *firewall_controller) +{ + struct stm32_firewall_controller *ctrl; + bool controller_removed = false; + + if (!firewall_controller) { + pr_debug("Null reference while unregistering firewall controller\n"); + return; + } + + mutex_lock(&firewall_controller_list_lock); + list_for_each_entry(ctrl, &firewall_controller_list, entry) { + if (ctrl == firewall_controller) { + controller_removed = true; + list_del_init(&ctrl->entry); + break; + } + } + mutex_unlock(&firewall_controller_list_lock); + + if (!controller_removed) + pr_debug("There was no firewall controller named %s to unregister\n", + firewall_controller->name); +} +EXPORT_SYMBOL_GPL(stm32_firewall_controller_unregister); + +int stm32_firewall_populate_bus(struct stm32_firewall_controller *firewall_controller) +{ + struct stm32_firewall *firewalls; + struct device_node *child; + struct device *parent; + unsigned int i; + int len; + int err; + + parent = firewall_controller->dev; + + dev_dbg(parent, "Populating %s system bus\n", dev_name(firewall_controller->dev)); + + for_each_available_child_of_node(dev_of_node(parent), child) { + /* The access-controllers property is mandatory for firewall bus devices */ + len = of_count_phandle_with_args(child, "access-controllers", + "#access-controller-cells"); + if (len <= 0) { + of_node_put(child); + return -EINVAL; + } + + firewalls = kcalloc(len, sizeof(*firewalls), GFP_KERNEL); + if (!firewalls) { + of_node_put(child); + return -ENOMEM; + } + + err = stm32_firewall_get_firewall(child, firewalls, (unsigned int)len); + if (err) { + kfree(firewalls); + of_node_put(child); + return err; + } + + for (i = 0; i < len; i++) { + if (firewall_controller->grant_access(firewall_controller, + firewalls[i].firewall_id)) { + /* + * Peripheral access not allowed or not defined. + * Mark the node as populated so platform bus won't probe it + */ + of_detach_node(child); + dev_err(parent, "%s: Device driver will not be probed\n", + child->full_name); + } + } + + kfree(firewalls); + } + + return 0; +} +EXPORT_SYMBOL_GPL(stm32_firewall_populate_bus); diff --git a/drivers/bus/stm32_firewall.h b/drivers/bus/stm32_firewall.h new file mode 100644 index 000000000000..e5fac85fe346 --- /dev/null +++ b/drivers/bus/stm32_firewall.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2023, STMicroelectronics - All Rights Reserved + */ + +#ifndef _STM32_FIREWALL_H +#define _STM32_FIREWALL_H + +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/types.h> + +/** + * STM32_PERIPHERAL_FIREWALL: This type of firewall protects peripherals + * STM32_MEMORY_FIREWALL: This type of firewall protects memories/subsets of memory + * zones + * STM32_NOTYPE_FIREWALL: Undefined firewall type + */ + +#define STM32_PERIPHERAL_FIREWALL BIT(1) +#define STM32_MEMORY_FIREWALL BIT(2) +#define STM32_NOTYPE_FIREWALL BIT(3) + +/** + * struct stm32_firewall_controller - Information on firewall controller supplying services + * + * @name: Name of the firewall controller + * @dev: Device reference of the firewall controller + * @mmio: Base address of the firewall controller + * @entry: List entry of the firewall controller list + * @type: Type of firewall + * @max_entries: Number of entries covered by the firewall + * @grant_access: Callback used to grant access for a device access against a + * firewall controller + * @release_access: Callback used to release resources taken by a device when access was + * granted + * @grant_memory_range_access: Callback used to grant access for a device to a given memory region + */ +struct stm32_firewall_controller { + const char *name; + struct device *dev; + void __iomem *mmio; + struct list_head entry; + unsigned int type; + unsigned int max_entries; + + int (*grant_access)(struct stm32_firewall_controller *ctrl, u32 id); + void (*release_access)(struct stm32_firewall_controller *ctrl, u32 id); + int (*grant_memory_range_access)(struct stm32_firewall_controller *ctrl, phys_addr_t paddr, + size_t size); +}; + +/** + * stm32_firewall_controller_register - Register a firewall controller to the STM32 firewall + * framework + * @firewall_controller: Firewall controller to register + * + * Returns 0 in case of success or -ENODEV if no controller was given. + */ +int stm32_firewall_controller_register(struct stm32_firewall_controller *firewall_controller); + +/** + * stm32_firewall_controller_unregister - Unregister a firewall controller from the STM32 + * firewall framework + * @firewall_controller: Firewall controller to unregister + */ +void stm32_firewall_controller_unregister(struct stm32_firewall_controller *firewall_controller); + +/** + * stm32_firewall_populate_bus - Populate device tree nodes that have a correct firewall + * configuration. This is used at boot-time only, as a sanity check + * between device tree and firewalls hardware configurations to + * prevent a kernel crash when a device driver is not granted access + * + * @firewall_controller: Firewall controller which nodes will be populated or not + * + * Returns 0 in case of success or appropriate errno code if error occurred. + */ +int stm32_firewall_populate_bus(struct stm32_firewall_controller *firewall_controller); + +#endif /* _STM32_FIREWALL_H */ diff --git a/drivers/bus/stm32_rifsc.c b/drivers/bus/stm32_rifsc.c new file mode 100644 index 000000000000..debeaf8ea1bd --- /dev/null +++ b/drivers/bus/stm32_rifsc.c @@ -0,0 +1,841 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023, STMicroelectronics - All Rights Reserved + */ + +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/types.h> + +#include "stm32_firewall.h" + +/* + * RIFSC offset register + */ +#define RIFSC_RISC_SECCFGR0 0x10 +#define RIFSC_RISC_PRIVCFGR0 0x30 +#define RIFSC_RISC_PER0_CIDCFGR 0x100 +#define RIFSC_RISC_PER0_SEMCR 0x104 +#define RIFSC_RISC_REG0_ACFGR 0x900 +#define RIFSC_RISC_REG3_AADDR 0x924 +#define RIFSC_RISC_HWCFGR2 0xFEC + +/* + * SEMCR register + */ +#define SEMCR_MUTEX BIT(0) + +/* + * HWCFGR2 register + */ +#define HWCFGR2_CONF1_MASK GENMASK(15, 0) +#define HWCFGR2_CONF2_MASK GENMASK(23, 16) +#define HWCFGR2_CONF3_MASK GENMASK(31, 24) + +/* + * RIFSC miscellaneous + */ +#define RIFSC_RISC_CFEN_MASK BIT(0) +#define RIFSC_RISC_SEM_EN_MASK BIT(1) +#define RIFSC_RISC_SCID_MASK GENMASK(6, 4) +#define RIFSC_RISC_SEML_SHIFT 16 +#define RIFSC_RISC_SEMWL_MASK GENMASK(23, 16) +#define RIFSC_RISC_PER_ID_MASK GENMASK(31, 24) + +#define RIFSC_RISC_PERx_CID_MASK (RIFSC_RISC_CFEN_MASK | \ + RIFSC_RISC_SEM_EN_MASK | \ + RIFSC_RISC_SCID_MASK | \ + RIFSC_RISC_SEMWL_MASK) + +#define IDS_PER_RISC_SEC_PRIV_REGS 32 + +/* RIF miscellaneous */ +/* + * CIDCFGR register fields + */ +#define CIDCFGR_CFEN BIT(0) +#define CIDCFGR_SEMEN BIT(1) +#define CIDCFGR_SEMWL(x) BIT(RIFSC_RISC_SEML_SHIFT + (x)) + +#define SEMWL_SHIFT 16 + +/* Compartiment IDs */ +#define RIF_CID0 0x0 +#define RIF_CID1 0x1 + +#if defined(CONFIG_DEBUG_FS) +#define RIFSC_RISUP_ENTRIES 128 +#define RIFSC_RIMU_ENTRIES 16 +#define RIFSC_RISAL_SUBREGIONS 2 +#define RIFSC_RISAL_GRANULARITY 8 + +#define RIFSC_RIMC_ATTR0 0xC10 + +#define RIFSC_RIMC_CIDSEL BIT(2) +#define RIFSC_RIMC_MCID_MASK GENMASK(6, 4) +#define RIFSC_RIMC_MSEC BIT(8) +#define RIFSC_RIMC_MPRIV BIT(9) + +#define RIFSC_RISC_SRCID_MASK GENMASK(6, 4) +#define RIFSC_RISC_SRPRIV BIT(9) +#define RIFSC_RISC_SRSEC BIT(8) +#define RIFSC_RISC_SRRLOCK BIT(1) +#define RIFSC_RISC_SREN BIT(0) +#define RIFSC_RISC_SRLENGTH_MASK GENMASK(27, 16) +#define RIFSC_RISC_SRSTART_MASK GENMASK(10, 0) + +static const char *stm32mp21_rifsc_rimu_names[RIFSC_RIMU_ENTRIES] = { + "ETR", + "SDMMC1", + "SDMMC2", + "SDMMC3", + "OTG_HS", + "USBH", + "ETH1", + "ETH2", + "RESERVED", + "RESERVED", + "DCMIPP", + "LTDC_L1/L2", + "LTDC_L3", + "RESERVED", + "RESERVED", + "RESERVED", +}; + +static const char *stm32mp25_rifsc_rimu_names[RIFSC_RIMU_ENTRIES] = { + "ETR", + "SDMMC1", + "SDMMC2", + "SDMMC3", + "USB3DR", + "USBH", + "ETH1", + "ETH2", + "PCIE", + "GPU", + "DMCIPP", + "LTDC_L0/L1", + "LTDC_L2", + "LTDC_ROT", + "VDEC", + "VENC" +}; + +static const char *stm32mp21_rifsc_risup_names[RIFSC_RISUP_ENTRIES] = { + "TIM1", + "TIM2", + "TIM3", + "TIM4", + "TIM5", + "TIM6", + "TIM7", + "TIM8", + "TIM10", + "TIM11", + "TIM12", + "TIM13", + "TIM14", + "TIM15", + "TIM16", + "TIM17", + "RESERVED", + "LPTIM1", + "LPTIM2", + "LPTIM3", + "LPTIM4", + "LPTIM5", + "SPI1", + "SPI2", + "SPI3", + "SPI4", + "SPI5", + "SPI6", + "RESERVED", + "RESERVED", + "SPDIFRX", + "USART1", + "USART2", + "USART3", + "UART4", + "UART5", + "USART6", + "UART7", + "RESERVED", + "RESERVED", + "LPUART1", + "I2C1", + "I2C2", + "I2C3", + "RESERVED", + "RESERVED", + "RESERVED", + "RESERVED", + "RESERVED", + "SAI1", + "SAI2", + "SAI3", + "SAI4", + "RESERVED", + "MDF1", + "RESERVED", + "FDCAN", + "HDP", + "ADC1", + "ADC2", + "ETH1", + "ETH2", + "RESERVED", + "USBH", + "RESERVED", + "RESERVED", + "OTG_HS", + "DDRPERFM", + "RESERVED", + "RESERVED", + "RESERVED", + "RESERVED", + "RESERVED", + "STGEN", + "OCTOSPI1", + "RESERVED", + "SDMMC1", + "SDMMC2", + "SDMMC3", + "RESERVED", + "LTDC_CMN", + "RESERVED", + "RESERVED", + "RESERVED", + "RESERVED", + "RESERVED", + "CSI", + "DCMIPP", + "DCMI_PSSI", + "RESERVED", + "RESERVED", + "RESERVED", + "RNG1", + "RNG2", + "PKA", + "SAES", + "HASH1", + "HASH2", + "CRYP1", + "CRYP2", + "IWDG1", + "IWDG2", + "IWDG3", + "IWDG4", + "WWDG1", + "RESERVED", + "VREFBUF", + "DTS", + "RAMCFG", + "CRC", + "SERC", + "RESERVED", + "RESERVED", + "RESERVED", + "I3C1", + "I3C2", + "I3C3", + "RESERVED", + "ICACHE_DCACHE", + "LTDC_L1L2", + "LTDC_L3", + "RESERVED", + "RESERVED", + "RESERVED", + "RESERVED", + "OTFDEC1", + "RESERVED", + "IAC", +}; + +static const char *stm32mp25_rifsc_risup_names[RIFSC_RISUP_ENTRIES] = { + "TIM1", + "TIM2", + "TIM3", + "TIM4", + "TIM5", + "TIM6", + "TIM7", + "TIM8", + "TIM10", + "TIM11", + "TIM12", + "TIM13", + "TIM14", + "TIM15", + "TIM16", + "TIM17", + "TIM20", + "LPTIM1", + "LPTIM2", + "LPTIM3", + "LPTIM4", + "LPTIM5", + "SPI1", + "SPI2", + "SPI3", + "SPI4", + "SPI5", + "SPI6", + "SPI7", + "SPI8", + "SPDIFRX", + "USART1", + "USART2", + "USART3", + "UART4", + "UART5", + "USART6", + "UART7", + "UART8", + "UART9", + "LPUART1", + "I2C1", + "I2C2", + "I2C3", + "I2C4", + "I2C5", + "I2C6", + "I2C7", + "I2C8", + "SAI1", + "SAI2", + "SAI3", + "SAI4", + "RESERVED", + "MDF1", + "ADF1", + "FDCAN", + "HDP", + "ADC12", + "ADC3", + "ETH1", + "ETH2", + "RESERVED", + "USBH", + "RESERVED", + "RESERVED", + "USB3DR", + "COMBOPHY", + "PCIE", + "UCPD1", + "ETHSW_DEIP", + "ETHSW_ACM_CF", + "ETHSW_ACM_MSGBU", + "STGEN", + "OCTOSPI1", + "OCTOSPI2", + "SDMMC1", + "SDMMC2", + "SDMMC3", + "GPU", + "LTDC_CMN", + "DSI_CMN", + "RESERVED", + "RESERVED", + "LVDS", + "RESERVED", + "CSI", + "DCMIPP", + "DCMI_PSSI", + "VDEC", + "VENC", + "RESERVED", + "RNG", + "PKA", + "SAES", + "HASH", + "CRYP1", + "CRYP2", + "IWDG1", + "IWDG2", + "IWDG3", + "IWDG4", + "IWDG5", + "WWDG1", + "WWDG2", + "RESERVED", + "VREFBUF", + "DTS", + "RAMCFG", + "CRC", + "SERC", + "OCTOSPIM", + "GICV2M", + "RESERVED", + "I3C1", + "I3C2", + "I3C3", + "I3C4", + "ICACHE_DCACHE", + "LTDC_L0L1", + "LTDC_L2", + "LTDC_ROT", + "DSI_TRIG", + "DSI_RDFIFO", + "RESERVED", + "OTFDEC1", + "OTFDEC2", + "IAC", +}; +struct rifsc_risup_debug_data { + char dev_name[15]; + u8 dev_cid; + u8 dev_sem_cids; + u8 dev_id; + bool dev_cid_filt_en; + bool dev_sem_en; + bool dev_priv; + bool dev_sec; +}; + +struct rifsc_rimu_debug_data { + char m_name[11]; + u8 m_cid; + bool cidsel; + bool m_sec; + bool m_priv; +}; + +struct rifsc_subreg_debug_data { + bool sr_sec; + bool sr_priv; + u8 sr_cid; + bool sr_rlock; + bool sr_enable; + u16 sr_start; + u16 sr_length; +}; + +struct stm32_rifsc_resources_names { + const char **device_names; + const char **initiator_names; +}; +struct rifsc_dbg_private { + const struct stm32_rifsc_resources_names *res_names; + void __iomem *mmio; + unsigned int nb_risup; + unsigned int nb_rimu; + unsigned int nb_risal; +}; + +static const struct stm32_rifsc_resources_names rifsc_mp21_res_names = { + .device_names = stm32mp21_rifsc_risup_names, + .initiator_names = stm32mp21_rifsc_rimu_names, +}; + +static const struct stm32_rifsc_resources_names rifsc_mp25_res_names = { + .device_names = stm32mp25_rifsc_risup_names, + .initiator_names = stm32mp25_rifsc_rimu_names, +}; + +static void stm32_rifsc_fill_rimu_dbg_entry(struct rifsc_dbg_private *rifsc, + struct rifsc_rimu_debug_data *dbg_entry, int i) +{ + const struct stm32_rifsc_resources_names *dbg_names = rifsc->res_names; + u32 rimc_attr = readl_relaxed(rifsc->mmio + RIFSC_RIMC_ATTR0 + 0x4 * i); + + snprintf(dbg_entry->m_name, sizeof(dbg_entry->m_name), "%s", dbg_names->initiator_names[i]); + dbg_entry->m_cid = FIELD_GET(RIFSC_RIMC_MCID_MASK, rimc_attr); + dbg_entry->cidsel = rimc_attr & RIFSC_RIMC_CIDSEL; + dbg_entry->m_sec = rimc_attr & RIFSC_RIMC_MSEC; + dbg_entry->m_priv = rimc_attr & RIFSC_RIMC_MPRIV; +} + +static void stm32_rifsc_fill_dev_dbg_entry(struct rifsc_dbg_private *rifsc, + struct rifsc_risup_debug_data *dbg_entry, int i) +{ + const struct stm32_rifsc_resources_names *dbg_names = rifsc->res_names; + u32 cid_cfgr, sec_cfgr, priv_cfgr; + u8 reg_id = i / IDS_PER_RISC_SEC_PRIV_REGS; + u8 reg_offset = i % IDS_PER_RISC_SEC_PRIV_REGS; + + cid_cfgr = readl_relaxed(rifsc->mmio + RIFSC_RISC_PER0_CIDCFGR + 0x8 * i); + sec_cfgr = readl_relaxed(rifsc->mmio + RIFSC_RISC_SECCFGR0 + 0x4 * reg_id); + priv_cfgr = readl_relaxed(rifsc->mmio + RIFSC_RISC_PRIVCFGR0 + 0x4 * reg_id); + + snprintf(dbg_entry->dev_name, sizeof(dbg_entry->dev_name), "%s", + dbg_names->device_names[i]); + dbg_entry->dev_id = i; + dbg_entry->dev_cid_filt_en = cid_cfgr & CIDCFGR_CFEN; + dbg_entry->dev_sem_en = cid_cfgr & CIDCFGR_SEMEN; + dbg_entry->dev_cid = FIELD_GET(RIFSC_RISC_SCID_MASK, cid_cfgr); + dbg_entry->dev_sem_cids = FIELD_GET(RIFSC_RISC_SEMWL_MASK, cid_cfgr); + dbg_entry->dev_sec = sec_cfgr & BIT(reg_offset) ? true : false; + dbg_entry->dev_priv = priv_cfgr & BIT(reg_offset) ? true : false; +} + + +static void stm32_rifsc_fill_subreg_dbg_entry(struct rifsc_dbg_private *rifsc, + struct rifsc_subreg_debug_data *dbg_entry, int i, + int j) +{ + u32 risc_xcfgr = readl_relaxed(rifsc->mmio + RIFSC_RISC_REG0_ACFGR + 0x10 * i + 0x8 * j); + u32 risc_xaddr; + + dbg_entry->sr_sec = risc_xcfgr & RIFSC_RISC_SRSEC; + dbg_entry->sr_priv = risc_xcfgr & RIFSC_RISC_SRPRIV; + dbg_entry->sr_cid = FIELD_GET(RIFSC_RISC_SRCID_MASK, risc_xcfgr); + dbg_entry->sr_rlock = risc_xcfgr & RIFSC_RISC_SRRLOCK; + dbg_entry->sr_enable = risc_xcfgr & RIFSC_RISC_SREN; + if (i == 2) { + risc_xaddr = readl_relaxed(rifsc->mmio + RIFSC_RISC_REG3_AADDR + 0x8 * j); + dbg_entry->sr_length = FIELD_GET(RIFSC_RISC_SRLENGTH_MASK, risc_xaddr); + dbg_entry->sr_start = FIELD_GET(RIFSC_RISC_SRSTART_MASK, risc_xaddr); + } else { + dbg_entry->sr_start = 0; + dbg_entry->sr_length = U16_MAX; + } +} + +static int stm32_rifsc_conf_dump_show(struct seq_file *s, void *data) +{ + struct rifsc_dbg_private *rifsc = (struct rifsc_dbg_private *)s->private; + int i, j; + + seq_puts(s, "\n=============================================\n"); + seq_puts(s, " RIFSC dump\n"); + seq_puts(s, "=============================================\n\n"); + + seq_puts(s, "\n=============================================\n"); + seq_puts(s, " RISUP dump\n"); + seq_puts(s, "=============================================\n"); + + seq_printf(s, "\n| %-15s |", "Peripheral name"); + seq_puts(s, "| Firewall ID |"); + seq_puts(s, "| N/SECURE |"); + seq_puts(s, "| N/PRIVILEGED |"); + seq_puts(s, "| CID filtering |"); + seq_puts(s, "| Semaphore mode |"); + seq_puts(s, "| SCID |"); + seq_printf(s, "| %7s |\n", "SEMWL"); + + for (i = 0; i < RIFSC_RISUP_ENTRIES && i < rifsc->nb_risup; i++) { + struct rifsc_risup_debug_data d_dbg_entry; + + stm32_rifsc_fill_dev_dbg_entry(rifsc, &d_dbg_entry, i); + + seq_printf(s, "| %-15s |", d_dbg_entry.dev_name); + seq_printf(s, "| %-11d |", d_dbg_entry.dev_id); + seq_printf(s, "| %-8s |", d_dbg_entry.dev_sec ? "SEC" : "NSEC"); + seq_printf(s, "| %-12s |", d_dbg_entry.dev_priv ? "PRIV" : "NPRIV"); + seq_printf(s, "| %-13s |", str_enabled_disabled(d_dbg_entry.dev_cid_filt_en)); + seq_printf(s, "| %-14s |", str_enabled_disabled(d_dbg_entry.dev_sem_en)); + seq_printf(s, "| %-4d |", d_dbg_entry.dev_cid); + seq_printf(s, "| %#-7x |\n", d_dbg_entry.dev_sem_cids); + } + + seq_puts(s, "\n=============================================\n"); + seq_puts(s, " RIMU dump\n"); + seq_puts(s, "=============================================\n"); + + seq_puts(s, "| RIMU's name |"); + seq_puts(s, "| CIDSEL |"); + seq_puts(s, "| MCID |"); + seq_puts(s, "| N/SECURE |"); + seq_puts(s, "| N/PRIVILEGED |\n"); + + for (i = 0; i < RIFSC_RIMU_ENTRIES && rifsc->nb_rimu; i++) { + struct rifsc_rimu_debug_data m_dbg_entry; + + stm32_rifsc_fill_rimu_dbg_entry(rifsc, &m_dbg_entry, i); + + seq_printf(s, "| %-11s |", m_dbg_entry.m_name); + seq_printf(s, "| %-6s |", m_dbg_entry.cidsel ? "CIDSEL" : ""); + seq_printf(s, "| %-4d |", m_dbg_entry.m_cid); + seq_printf(s, "| %-8s |", m_dbg_entry.m_sec ? "SEC" : "NSEC"); + seq_printf(s, "| %-12s |\n", m_dbg_entry.m_priv ? "PRIV" : "NPRIV"); + } + + if (rifsc->nb_risal > 0) { + seq_puts(s, "\n=============================================\n"); + seq_puts(s, " RISAL dump\n"); + seq_puts(s, "=============================================\n"); + + seq_puts(s, "| Memory |"); + seq_puts(s, "| Subreg. |"); + seq_puts(s, "| N/SECURE |"); + seq_puts(s, "| N/PRIVILEGED |"); + seq_puts(s, "| Subreg. CID |"); + seq_puts(s, "| Resource lock |"); + seq_puts(s, "| Subreg. enable |"); + seq_puts(s, "| Subreg. start |"); + seq_puts(s, "| Subreg. end |\n"); + + for (i = 0; i < rifsc->nb_risal; i++) { + for (j = 0; j < RIFSC_RISAL_SUBREGIONS; j++) { + struct rifsc_subreg_debug_data sr_dbg_entry; + + stm32_rifsc_fill_subreg_dbg_entry(rifsc, &sr_dbg_entry, i, j); + + seq_printf(s, "| LPSRAM%1d |", i + 1); + seq_printf(s, "| %1s |", (j == 0) ? "A" : "B"); + seq_printf(s, "| %-8s |", sr_dbg_entry.sr_sec ? "SEC" : "NSEC"); + seq_printf(s, "| %-12s |", sr_dbg_entry.sr_priv ? "PRIV" : "NPRIV"); + seq_printf(s, "| 0x%-9x |", sr_dbg_entry.sr_cid); + seq_printf(s, "| %-13s |", + sr_dbg_entry.sr_rlock ? "locked (1)" : "unlocked (0)"); + seq_printf(s, "| %-14s |", + str_enabled_disabled(sr_dbg_entry.sr_enable)); + seq_printf(s, "| 0x%-11x |", sr_dbg_entry.sr_start); + seq_printf(s, "| 0x%-11x |\n", sr_dbg_entry.sr_start + + sr_dbg_entry.sr_length - 1); + } + } + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(stm32_rifsc_conf_dump); + +static int stm32_rifsc_register_debugfs(struct stm32_firewall_controller *rifsc_controller, + u32 nb_risup, u32 nb_rimu, u32 nb_risal) +{ + struct rifsc_dbg_private *rifsc_priv; + struct dentry *root = NULL; + + rifsc_priv = devm_kzalloc(rifsc_controller->dev, sizeof(*rifsc_priv), GFP_KERNEL); + if (!rifsc_priv) + return -ENOMEM; + + rifsc_priv->mmio = rifsc_controller->mmio; + rifsc_priv->nb_risup = nb_risup; + rifsc_priv->nb_rimu = nb_rimu; + rifsc_priv->nb_risal = nb_risal; + rifsc_priv->res_names = of_device_get_match_data(rifsc_controller->dev); + + root = debugfs_lookup("stm32_firewall", NULL); + if (!root) + root = debugfs_create_dir("stm32_firewall", NULL); + + if (IS_ERR(root)) + return PTR_ERR(root); + + debugfs_create_file("rifsc", 0444, root, rifsc_priv, &stm32_rifsc_conf_dump_fops); + + return 0; +} +#endif /* defined(CONFIG_DEBUG_FS) */ + +static bool stm32_rifsc_is_semaphore_available(void __iomem *addr) +{ + return !(readl(addr) & SEMCR_MUTEX); +} + +static int stm32_rif_acquire_semaphore(struct stm32_firewall_controller *stm32_firewall_controller, + int id) +{ + void __iomem *addr = stm32_firewall_controller->mmio + RIFSC_RISC_PER0_SEMCR + 0x8 * id; + + writel(SEMCR_MUTEX, addr); + + /* Check that CID1 has the semaphore */ + if (stm32_rifsc_is_semaphore_available(addr) || + FIELD_GET(RIFSC_RISC_SCID_MASK, readl(addr)) != RIF_CID1) + return -EACCES; + + return 0; +} + +static void stm32_rif_release_semaphore(struct stm32_firewall_controller *stm32_firewall_controller, + int id) +{ + void __iomem *addr = stm32_firewall_controller->mmio + RIFSC_RISC_PER0_SEMCR + 0x8 * id; + + if (stm32_rifsc_is_semaphore_available(addr)) + return; + + writel(SEMCR_MUTEX, addr); + + /* Ok if another compartment takes the semaphore before the check */ + WARN_ON(!stm32_rifsc_is_semaphore_available(addr) && + FIELD_GET(RIFSC_RISC_SCID_MASK, readl(addr)) == RIF_CID1); +} + +static int stm32_rifsc_grant_access(struct stm32_firewall_controller *ctrl, u32 firewall_id) +{ + struct stm32_firewall_controller *rifsc_controller = ctrl; + u32 reg_offset, reg_id, sec_reg_value, cid_reg_value; + int rc; + + if (firewall_id >= rifsc_controller->max_entries) { + dev_err(rifsc_controller->dev, "Invalid sys bus ID %u", firewall_id); + return -EINVAL; + } + + /* + * RIFSC_RISC_PRIVCFGRx and RIFSC_RISC_SECCFGRx both handle configuration access for + * 32 peripherals. On the other hand, there is one _RIFSC_RISC_PERx_CIDCFGR register + * per peripheral + */ + reg_id = firewall_id / IDS_PER_RISC_SEC_PRIV_REGS; + reg_offset = firewall_id % IDS_PER_RISC_SEC_PRIV_REGS; + sec_reg_value = readl(rifsc_controller->mmio + RIFSC_RISC_SECCFGR0 + 0x4 * reg_id); + cid_reg_value = readl(rifsc_controller->mmio + RIFSC_RISC_PER0_CIDCFGR + 0x8 * firewall_id); + + /* First check conditions for semaphore mode, which doesn't take into account static CID. */ + if ((cid_reg_value & CIDCFGR_SEMEN) && (cid_reg_value & CIDCFGR_CFEN)) { + if (cid_reg_value & BIT(RIF_CID1 + SEMWL_SHIFT)) { + /* Static CID is irrelevant if semaphore mode */ + goto skip_cid_check; + } else { + dev_dbg(rifsc_controller->dev, + "Invalid bus semaphore configuration: index %d\n", firewall_id); + return -EACCES; + } + } + + /* + * Skip CID check if CID filtering isn't enabled or filtering is enabled on CID0, which + * corresponds to whatever CID. + */ + if (!(cid_reg_value & CIDCFGR_CFEN) || + FIELD_GET(RIFSC_RISC_SCID_MASK, cid_reg_value) == RIF_CID0) + goto skip_cid_check; + + /* Coherency check with the CID configuration */ + if (FIELD_GET(RIFSC_RISC_SCID_MASK, cid_reg_value) != RIF_CID1) { + dev_dbg(rifsc_controller->dev, "Invalid CID configuration for peripheral: %d\n", + firewall_id); + return -EACCES; + } + +skip_cid_check: + /* Check security configuration */ + if (sec_reg_value & BIT(reg_offset)) { + dev_dbg(rifsc_controller->dev, + "Invalid security configuration for peripheral: %d\n", firewall_id); + return -EACCES; + } + + /* + * If the peripheral is in semaphore mode, take the semaphore so that + * the CID1 has the ownership. + */ + if ((cid_reg_value & CIDCFGR_SEMEN) && (cid_reg_value & CIDCFGR_CFEN)) { + rc = stm32_rif_acquire_semaphore(rifsc_controller, firewall_id); + if (rc) { + dev_err(rifsc_controller->dev, + "Couldn't acquire semaphore for peripheral: %d\n", firewall_id); + return rc; + } + } + + return 0; +} + +static void stm32_rifsc_release_access(struct stm32_firewall_controller *ctrl, u32 firewall_id) +{ + stm32_rif_release_semaphore(ctrl, firewall_id); +} + +static int stm32_rifsc_probe(struct platform_device *pdev) +{ + struct stm32_firewall_controller *rifsc_controller; + struct device_node *np = pdev->dev.of_node; + u32 nb_risup, nb_rimu, nb_risal; + struct resource *res; + void __iomem *mmio; + int rc; + + rifsc_controller = devm_kzalloc(&pdev->dev, sizeof(*rifsc_controller), GFP_KERNEL); + if (!rifsc_controller) + return -ENOMEM; + + mmio = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(mmio)) + return PTR_ERR(mmio); + + rifsc_controller->dev = &pdev->dev; + rifsc_controller->mmio = mmio; + rifsc_controller->name = dev_driver_string(rifsc_controller->dev); + rifsc_controller->type = STM32_PERIPHERAL_FIREWALL | STM32_MEMORY_FIREWALL; + rifsc_controller->grant_access = stm32_rifsc_grant_access; + rifsc_controller->release_access = stm32_rifsc_release_access; + + /* Get number of RIFSC entries*/ + nb_risup = FIELD_GET(HWCFGR2_CONF1_MASK, + readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2)); + nb_rimu = FIELD_GET(HWCFGR2_CONF2_MASK, + readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2)); + nb_risal = FIELD_GET(HWCFGR2_CONF3_MASK, + readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2)); + /* + * On STM32MP21, RIFSC_RISC_HWCFGR2 shows an incorrect number of RISAL (NUM_RISAL is 3 + * instead of 0). A software workaround is implemented using the st,mem-map property in the + * device tree. This property is absent or left empty if there is no RISAL. + */ + if (of_device_is_compatible(np, "st,stm32mp21-rifsc")) + nb_risal = 0; + rifsc_controller->max_entries = nb_risup + nb_rimu + nb_risal; + + platform_set_drvdata(pdev, rifsc_controller); + + rc = stm32_firewall_controller_register(rifsc_controller); + if (rc) { + dev_err(rifsc_controller->dev, "Couldn't register as a firewall controller: %d", + rc); + return rc; + } + + rc = stm32_firewall_populate_bus(rifsc_controller); + if (rc) { + dev_err(rifsc_controller->dev, "Couldn't populate RIFSC bus: %d", + rc); + return rc; + } + +#if defined(CONFIG_DEBUG_FS) + rc = stm32_rifsc_register_debugfs(rifsc_controller, nb_risup, nb_rimu, nb_risal); + if (rc) + return dev_err_probe(rifsc_controller->dev, rc, "Failed creating debugfs entry\n"); +#endif + + /* Populate all allowed nodes */ + return of_platform_populate(np, NULL, NULL, &pdev->dev); +} + +static const struct of_device_id stm32_rifsc_of_match[] = { + { + .compatible = "st,stm32mp25-rifsc", +#if defined(CONFIG_DEBUG_FS) + .data = &rifsc_mp25_res_names, +#endif + }, + { + .compatible = "st,stm32mp21-rifsc", +#if defined(CONFIG_DEBUG_FS) + .data = &rifsc_mp21_res_names, +#endif + }, + {} +}; +MODULE_DEVICE_TABLE(of, stm32_rifsc_of_match); + +static struct platform_driver stm32_rifsc_driver = { + .probe = stm32_rifsc_probe, + .driver = { + .name = "stm32-rifsc", + .of_match_table = stm32_rifsc_of_match, + }, +}; +module_platform_driver(stm32_rifsc_driver); + +MODULE_AUTHOR("Gatien Chevallier <gatien.chevallier@foss.st.com>"); +MODULE_DESCRIPTION("STMicroelectronics RIFSC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/bus/sun50i-de2.c b/drivers/bus/sun50i-de2.c index 414f29cdedf0..dfe588179aca 100644 --- a/drivers/bus/sun50i-de2.c +++ b/drivers/bus/sun50i-de2.c @@ -24,10 +24,9 @@ static int sun50i_de2_bus_probe(struct platform_device *pdev) return 0; } -static int sun50i_de2_bus_remove(struct platform_device *pdev) +static void sun50i_de2_bus_remove(struct platform_device *pdev) { sunxi_sram_release(&pdev->dev); - return 0; } static const struct of_device_id sun50i_de2_bus_of_match[] = { diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index 4cd2e127946e..82735c58be11 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -39,7 +39,7 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/of_irq.h> -#include <linux/of_platform.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/pm_runtime.h> @@ -128,9 +128,9 @@ struct sunxi_rsb { }; /* bus / slave device related functions */ -static struct bus_type sunxi_rsb_bus; +static const struct bus_type sunxi_rsb_bus; -static int sunxi_rsb_device_match(struct device *dev, struct device_driver *drv) +static int sunxi_rsb_device_match(struct device *dev, const struct device_driver *drv) { return of_driver_match_device(dev, drv); } @@ -172,12 +172,17 @@ static void sunxi_rsb_device_remove(struct device *dev) drv->remove(to_sunxi_rsb_device(dev)); } -static struct bus_type sunxi_rsb_bus = { +static int sunxi_rsb_device_modalias(const struct device *dev, struct kobj_uevent_env *env) +{ + return of_device_uevent_modalias(dev, env); +} + +static const struct bus_type sunxi_rsb_bus = { .name = RSB_CTRL_NAME, .match = sunxi_rsb_device_match, .probe = sunxi_rsb_device_probe, .remove = sunxi_rsb_device_remove, - .uevent = of_device_uevent_modalias, + .uevent = sunxi_rsb_device_modalias, }; static void sunxi_rsb_dev_release(struct device *dev) @@ -267,6 +272,9 @@ EXPORT_SYMBOL_GPL(sunxi_rsb_driver_register); /* common code that starts a transfer */ static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb) { + u32 int_mask, status; + bool timeout; + if (readl(rsb->regs + RSB_CTRL) & RSB_CTRL_START_TRANS) { dev_dbg(rsb->dev, "RSB transfer still in progress\n"); return -EBUSY; @@ -274,13 +282,23 @@ static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb) reinit_completion(&rsb->complete); - writel(RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER, - rsb->regs + RSB_INTE); + int_mask = RSB_INTS_LOAD_BSY | RSB_INTS_TRANS_ERR | RSB_INTS_TRANS_OVER; + writel(int_mask, rsb->regs + RSB_INTE); writel(RSB_CTRL_START_TRANS | RSB_CTRL_GLOBAL_INT_ENB, rsb->regs + RSB_CTRL); - if (!wait_for_completion_io_timeout(&rsb->complete, - msecs_to_jiffies(100))) { + if (irqs_disabled()) { + timeout = readl_poll_timeout_atomic(rsb->regs + RSB_INTS, + status, (status & int_mask), + 10, 100000); + writel(status, rsb->regs + RSB_INTS); + } else { + timeout = !wait_for_completion_io_timeout(&rsb->complete, + msecs_to_jiffies(100)); + status = rsb->status; + } + + if (timeout) { dev_dbg(rsb->dev, "RSB timeout\n"); /* abort the transfer */ @@ -292,18 +310,18 @@ static int _sunxi_rsb_run_xfer(struct sunxi_rsb *rsb) return -ETIMEDOUT; } - if (rsb->status & RSB_INTS_LOAD_BSY) { + if (status & RSB_INTS_LOAD_BSY) { dev_dbg(rsb->dev, "RSB busy\n"); return -EBUSY; } - if (rsb->status & RSB_INTS_TRANS_ERR) { - if (rsb->status & RSB_INTS_TRANS_ERR_ACK) { + if (status & RSB_INTS_TRANS_ERR) { + if (status & RSB_INTS_TRANS_ERR_ACK) { dev_dbg(rsb->dev, "RSB slave nack\n"); return -EINVAL; } - if (rsb->status & RSB_INTS_TRANS_ERR_DATA) { + if (status & RSB_INTS_TRANS_ERR_DATA) { dev_dbg(rsb->dev, "RSB transfer data error\n"); return -EIO; } @@ -355,7 +373,6 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr, unlock: mutex_unlock(&rsb->lock); - pm_runtime_mark_last_busy(rsb->dev); pm_runtime_put_autosuspend(rsb->dev); return ret; @@ -399,7 +416,6 @@ static int sunxi_rsb_write(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr, mutex_unlock(&rsb->lock); - pm_runtime_mark_last_busy(rsb->dev); pm_runtime_put_autosuspend(rsb->dev); return ret; @@ -439,7 +455,7 @@ static void regmap_sunxi_rsb_free_ctx(void *context) kfree(ctx); } -static struct regmap_bus regmap_sunxi_rsb = { +static const struct regmap_bus regmap_sunxi_rsb = { .reg_write = regmap_sunxi_rsb_reg_write, .reg_read = regmap_sunxi_rsb_reg_read, .free_context = regmap_sunxi_rsb_free_ctx, @@ -728,18 +744,15 @@ static int sunxi_rsb_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - struct resource *r; struct sunxi_rsb *rsb; u32 clk_freq = 3000000; int irq, ret; of_property_read_u32(np, "clock-frequency", &clk_freq); - if (clk_freq > RSB_MAX_FREQ) { - dev_err(dev, - "clock-frequency (%u Hz) is too high (max = 20MHz)\n", - clk_freq); - return -EINVAL; - } + if (clk_freq > RSB_MAX_FREQ) + return dev_err_probe(dev, -EINVAL, + "clock-frequency (%u Hz) is too high (max = 20MHz)\n", + clk_freq); rsb = devm_kzalloc(dev, sizeof(*rsb), GFP_KERNEL); if (!rsb) @@ -748,8 +761,7 @@ static int sunxi_rsb_probe(struct platform_device *pdev) rsb->dev = dev; rsb->clk_freq = clk_freq; platform_set_drvdata(pdev, rsb); - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - rsb->regs = devm_ioremap_resource(dev, r); + rsb->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(rsb->regs)) return PTR_ERR(rsb->regs); @@ -758,28 +770,22 @@ static int sunxi_rsb_probe(struct platform_device *pdev) return irq; rsb->clk = devm_clk_get(dev, NULL); - if (IS_ERR(rsb->clk)) { - ret = PTR_ERR(rsb->clk); - dev_err(dev, "failed to retrieve clk: %d\n", ret); - return ret; - } + if (IS_ERR(rsb->clk)) + return dev_err_probe(dev, PTR_ERR(rsb->clk), + "failed to retrieve clk\n"); rsb->rstc = devm_reset_control_get(dev, NULL); - if (IS_ERR(rsb->rstc)) { - ret = PTR_ERR(rsb->rstc); - dev_err(dev, "failed to retrieve reset controller: %d\n", ret); - return ret; - } + if (IS_ERR(rsb->rstc)) + return dev_err_probe(dev, PTR_ERR(rsb->rstc), + "failed to retrieve reset controller\n"); init_completion(&rsb->complete); mutex_init(&rsb->lock); ret = devm_request_irq(dev, irq, sunxi_rsb_irq, 0, RSB_CTRL_NAME, rsb); - if (ret) { - dev_err(dev, "can't register interrupt handler irq %d: %d\n", - irq, ret); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, + "can't register interrupt handler irq %d\n", irq); ret = sunxi_rsb_hw_init(rsb); if (ret) @@ -801,23 +807,13 @@ static int sunxi_rsb_probe(struct platform_device *pdev) return 0; } -static int sunxi_rsb_remove(struct platform_device *pdev) +static void sunxi_rsb_remove(struct platform_device *pdev) { struct sunxi_rsb *rsb = platform_get_drvdata(pdev); device_for_each_child(rsb->dev, NULL, sunxi_rsb_remove_devices); pm_runtime_disable(&pdev->dev); sunxi_rsb_hw_exit(rsb); - - return 0; -} - -static void sunxi_rsb_shutdown(struct platform_device *pdev) -{ - struct sunxi_rsb *rsb = platform_get_drvdata(pdev); - - pm_runtime_disable(&pdev->dev); - sunxi_rsb_hw_exit(rsb); } static const struct dev_pm_ops sunxi_rsb_dev_pm_ops = { @@ -834,8 +830,7 @@ MODULE_DEVICE_TABLE(of, sunxi_rsb_of_match_table); static struct platform_driver sunxi_rsb_driver = { .probe = sunxi_rsb_probe, - .remove = sunxi_rsb_remove, - .shutdown = sunxi_rsb_shutdown, + .remove = sunxi_rsb_remove, .driver = { .name = RSB_CTRL_NAME, .of_match_table = sunxi_rsb_of_match_table, @@ -853,7 +848,13 @@ static int __init sunxi_rsb_init(void) return ret; } - return platform_driver_register(&sunxi_rsb_driver); + ret = platform_driver_register(&sunxi_rsb_driver); + if (ret) { + bus_unregister(&sunxi_rsb_bus); + return ret; + } + + return 0; } module_init(sunxi_rsb_init); diff --git a/drivers/bus/tegra-aconnect.c b/drivers/bus/tegra-aconnect.c index ac58142301f4..90e3b0a10816 100644 --- a/drivers/bus/tegra-aconnect.c +++ b/drivers/bus/tegra-aconnect.c @@ -53,11 +53,9 @@ static int tegra_aconnect_probe(struct platform_device *pdev) return 0; } -static int tegra_aconnect_remove(struct platform_device *pdev) +static void tegra_aconnect_remove(struct platform_device *pdev) { pm_runtime_disable(&pdev->dev); - - return 0; } static int tegra_aconnect_runtime_resume(struct device *dev) diff --git a/drivers/bus/tegra-gmi.c b/drivers/bus/tegra-gmi.c index 662266719682..9c09141961d8 100644 --- a/drivers/bus/tegra-gmi.c +++ b/drivers/bus/tegra-gmi.c @@ -9,7 +9,9 @@ #include <linux/delay.h> #include <linux/io.h> #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> @@ -209,7 +211,6 @@ static int tegra_gmi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct tegra_gmi *gmi; - struct resource *res; int err; gmi = devm_kzalloc(dev, sizeof(*gmi), GFP_KERNEL); @@ -219,8 +220,7 @@ static int tegra_gmi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, gmi); gmi->dev = dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - gmi->base = devm_ioremap_resource(dev, res); + gmi->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(gmi->base)) return PTR_ERR(gmi->base); @@ -258,14 +258,12 @@ static int tegra_gmi_probe(struct platform_device *pdev) return 0; } -static int tegra_gmi_remove(struct platform_device *pdev) +static void tegra_gmi_remove(struct platform_device *pdev) { struct tegra_gmi *gmi = platform_get_drvdata(pdev); of_platform_depopulate(gmi->dev); tegra_gmi_disable(gmi); - - return 0; } static int __maybe_unused tegra_gmi_runtime_resume(struct device *dev) diff --git a/drivers/bus/ti-pwmss.c b/drivers/bus/ti-pwmss.c index e9c26c94251b..1f2cab91e438 100644 --- a/drivers/bus/ti-pwmss.c +++ b/drivers/bus/ti-pwmss.c @@ -10,7 +10,7 @@ #include <linux/io.h> #include <linux/err.h> #include <linux/pm_runtime.h> -#include <linux/of_device.h> +#include <linux/of_platform.h> static const struct of_device_id pwmss_of_match[] = { { .compatible = "ti,am33xx-pwmss" }, @@ -33,10 +33,9 @@ static int pwmss_probe(struct platform_device *pdev) return ret; } -static int pwmss_remove(struct platform_device *pdev) +static void pwmss_remove(struct platform_device *pdev) { pm_runtime_disable(&pdev->dev); - return 0; } static struct platform_driver pwmss_driver = { diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 9a7d12332fad..610354ce7f8f 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -1,6 +1,17 @@ // SPDX-License-Identifier: GPL-2.0 /* * ti-sysc.c - Texas Instruments sysc interconnect target driver + * + * TI SoCs have an interconnect target wrapper IP for many devices. The wrapper + * IP manages clock gating, resets, and PM capabilities for the connected devices. + * + * Copyright (C) 2017-2024 Texas Instruments Incorporated - https://www.ti.com/ + * + * Many features are based on the earlier omap_hwmod arch code with thanks to all + * the people who developed and debugged the code over the years: + * + * Copyright (C) 2009-2011 Nokia Corporation + * Copyright (C) 2011-2021 Texas Instruments Incorporated - https://www.ti.com/ */ #include <linux/io.h> @@ -37,7 +48,9 @@ enum sysc_soc { SOC_UNKNOWN, SOC_2420, SOC_2430, + SOC_AM33, SOC_3430, + SOC_AM35, SOC_3630, SOC_4430, SOC_4460, @@ -109,11 +122,11 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = { * @cookie: data used by legacy platform callbacks * @name: name if available * @revision: interconnect target module revision + * @sysconfig: saved sysconfig register value * @reserved: target module is reserved and already in use * @enabled: sysc runtime enabled status * @needs_resume: runtime resume needed on resume from suspend * @child_needs_resume: runtime resume needed for child on resume from suspend - * @disable_on_idle: status flag used for disabling modules with resets * @idle_work: work structure used to perform delayed idle on a module * @pre_reset_quirk: module specific pre-reset quirk * @post_reset_quirk: module specific post-reset quirk @@ -648,91 +661,23 @@ static int sysc_init_resets(struct sysc *ddata) static int sysc_parse_and_check_child_range(struct sysc *ddata) { struct device_node *np = ddata->dev->of_node; - const __be32 *ranges; - u32 nr_addr, nr_size; - int len, error; - - ranges = of_get_property(np, "ranges", &len); - if (!ranges) { - dev_err(ddata->dev, "missing ranges for %pOF\n", np); - - return -ENOENT; - } - - len /= sizeof(*ranges); - - if (len < 3) { - dev_err(ddata->dev, "incomplete ranges for %pOF\n", np); - - return -EINVAL; - } - - error = of_property_read_u32(np, "#address-cells", &nr_addr); - if (error) - return -ENOENT; + struct of_range_parser parser; + struct of_range range; + int error; - error = of_property_read_u32(np, "#size-cells", &nr_size); + error = of_range_parser_init(&parser, np); if (error) - return -ENOENT; - - if (nr_addr != 1 || nr_size != 1) { - dev_err(ddata->dev, "invalid ranges for %pOF\n", np); + return error; - return -EINVAL; + for_each_of_range(&parser, &range) { + ddata->module_pa = range.cpu_addr; + ddata->module_size = range.size; + break; } - ranges++; - ddata->module_pa = of_translate_address(np, ranges++); - ddata->module_size = be32_to_cpup(ranges); - return 0; } -/* Interconnect instances to probe before l4_per instances */ -static struct resource early_bus_ranges[] = { - /* am3/4 l4_wkup */ - { .start = 0x44c00000, .end = 0x44c00000 + 0x300000, }, - /* omap4/5 and dra7 l4_cfg */ - { .start = 0x4a000000, .end = 0x4a000000 + 0x300000, }, - /* omap4 l4_wkup */ - { .start = 0x4a300000, .end = 0x4a300000 + 0x30000, }, - /* omap5 and dra7 l4_wkup without dra7 dcan segment */ - { .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000, }, -}; - -static atomic_t sysc_defer = ATOMIC_INIT(10); - -/** - * sysc_defer_non_critical - defer non_critical interconnect probing - * @ddata: device driver data - * - * We want to probe l4_cfg and l4_wkup interconnect instances before any - * l4_per instances as l4_per instances depend on resources on l4_cfg and - * l4_wkup interconnects. - */ -static int sysc_defer_non_critical(struct sysc *ddata) -{ - struct resource *res; - int i; - - if (!atomic_read(&sysc_defer)) - return 0; - - for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) { - res = &early_bus_ranges[i]; - if (ddata->module_pa >= res->start && - ddata->module_pa <= res->end) { - atomic_set(&sysc_defer, 0); - - return 0; - } - } - - atomic_dec_if_positive(&sysc_defer); - - return -EPROBE_DEFER; -} - static struct device_node *stdout_path; static void sysc_init_stdout_path(struct sysc *ddata) @@ -913,7 +858,7 @@ static int sysc_check_registers(struct sysc *ddata) * within the interconnect target module range. For example, SGX has * them at offset 0x1fc00 in the 32MB module address space. And cpsw * has them at offset 0x1200 in the CPSW_WR child. Usually the - * the interconnect target module registers are at the beginning of + * interconnect target module registers are at the beginning of * the module range though. */ static int sysc_ioremap(struct sysc *ddata) @@ -958,13 +903,9 @@ static int sysc_map_and_check_registers(struct sysc *ddata) if (error) return error; - error = sysc_defer_non_critical(ddata); - if (error) - return error; - sysc_check_children(ddata); - if (!of_get_property(np, "reg", NULL)) + if (!of_property_present(np, "reg")) return 0; error = sysc_parse_registers(ddata); @@ -1119,6 +1060,11 @@ static int sysc_enable_module(struct device *dev) if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_SIDLE_ACT)) { best_mode = SYSC_IDLE_NO; + + /* Clear WAKEUP */ + if (regbits->enwkup_shift >= 0 && + ddata->cfg.sysc_val & BIT(regbits->enwkup_shift)) + reg &= ~BIT(regbits->enwkup_shift); } else { best_mode = fls(ddata->cfg.sidlemodes) - 1; if (best_mode > SYSC_IDLE_MASK) { @@ -1246,6 +1192,13 @@ set_sidle: } } + if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE_ACT) { + /* Set WAKEUP */ + if (regbits->enwkup_shift >= 0 && + ddata->cfg.sysc_val & BIT(regbits->enwkup_shift)) + reg |= BIT(regbits->enwkup_shift); + } + reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift); reg |= best_mode << regbits->sidle_shift; if (regbits->autoidle_shift >= 0 && @@ -1467,8 +1420,7 @@ static int __maybe_unused sysc_noirq_suspend(struct device *dev) ddata = dev_get_drvdata(dev); - if (ddata->cfg.quirks & - (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE)) + if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE) return 0; if (!ddata->enabled) @@ -1486,8 +1438,7 @@ static int __maybe_unused sysc_noirq_resume(struct device *dev) ddata = dev_get_drvdata(dev); - if (ddata->cfg.quirks & - (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE)) + if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE) return 0; if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_RESUME) { @@ -1538,17 +1489,6 @@ struct sysc_revision_quirk { } static const struct sysc_revision_quirk sysc_revision_quirks[] = { - /* These drivers need to be fixed to not use pm_runtime_irq_safe() */ - SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff, - SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), - SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, - SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), - /* Uarts on omap4 and later */ - SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff, - SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), - SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff, - SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), - /* Quirks that need to be set based on the module address */ SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff, SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT | @@ -1606,10 +1546,27 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), SYSC_QUIRK("sata", 0, 0xfc, 0x1100, -ENODEV, 0x5e412000, 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), + SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff, + SYSC_QUIRK_SWSUP_SIDLE_ACT), + SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, + SYSC_QUIRK_SWSUP_SIDLE_ACT), + /* Uarts on omap4 and later */ + SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff, + SYSC_QUIRK_SWSUP_SIDLE_ACT), + SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff, + SYSC_QUIRK_SWSUP_SIDLE_ACT), + SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff, + SYSC_QUIRK_SWSUP_SIDLE_ACT), SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), + SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000033, + 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY | + SYSC_MODULE_QUIRK_OTG), + SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000040, + 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY | + SYSC_MODULE_QUIRK_OTG), SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050, 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY | SYSC_MODULE_QUIRK_OTG), @@ -1808,7 +1765,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset, if (!ddata->module_va) return -EIO; - /* DISP_CONTROL */ + /* DISP_CONTROL, shut down lcd and digit on disable if enabled */ val = sysc_read(ddata, dispc_offset + 0x40); lcd_en = val & lcd_en_mask; digit_en = val & digit_en_mask; @@ -1820,7 +1777,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset, else irq_mask |= BIT(2) | BIT(3); /* EVSYNC bits */ } - if (disable & (lcd_en | digit_en)) + if (disable && (lcd_en || digit_en)) sysc_write(ddata, dispc_offset + 0x40, val & ~(lcd_en_mask | digit_en_mask)); @@ -1876,7 +1833,7 @@ static void sysc_pre_reset_quirk_dss(struct sysc *ddata) dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n", __func__, val, irq_mask); - if (sysc_soc->soc == SOC_3430) { + if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35) { /* Clear DSS_SDI_CONTROL */ sysc_write(ddata, 0x44, 0); @@ -2031,6 +1988,21 @@ static void sysc_module_disable_quirk_pruss(struct sysc *ddata) sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); } +static void sysc_module_enable_quirk_pruss(struct sysc *ddata) +{ + u32 reg; + + reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); + + /* + * Clearing the SYSC_PRUSS_STANDBY_INIT bit - Updates OCP master + * port configuration to enable memory access outside of the + * PRU-ICSS subsystem. + */ + reg &= (~SYSC_PRUSS_STANDBY_INIT); + sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); +} + static void sysc_init_module_quirks(struct sysc *ddata) { if (ddata->legacy_mode || !ddata->name) @@ -2083,8 +2055,10 @@ static void sysc_init_module_quirks(struct sysc *ddata) ddata->module_disable_quirk = sysc_reset_done_quirk_wdt; } - if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS) + if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS) { + ddata->module_enable_quirk = sysc_module_enable_quirk_pruss; ddata->module_disable_quirk = sysc_module_disable_quirk_pruss; + } } static int sysc_clockdomain_init(struct sysc *ddata) @@ -2146,8 +2120,7 @@ static int sysc_reset(struct sysc *ddata) sysc_offset = ddata->offsets[SYSC_SYSCONFIG]; if (ddata->legacy_mode || - ddata->cap->regbits->srst_shift < 0 || - ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) + ddata->cap->regbits->srst_shift < 0) return 0; sysc_mask = BIT(ddata->cap->regbits->srst_shift); @@ -2159,11 +2132,22 @@ static int sysc_reset(struct sysc *ddata) sysc_val = sysc_read_sysconfig(ddata); sysc_val |= sysc_mask; sysc_write(ddata, sysc_offset, sysc_val); - } - if (ddata->cfg.srst_udelay) - usleep_range(ddata->cfg.srst_udelay, - ddata->cfg.srst_udelay * 2); + /* + * Some devices need a delay before reading registers + * after reset. Presumably a srst_udelay is not needed + * for devices that use a rstctrl register reset. + */ + if (ddata->cfg.srst_udelay) + fsleep(ddata->cfg.srst_udelay); + + /* + * Flush posted write. For devices needing srst_udelay + * this should trigger an interconnect error if the + * srst_udelay value is needed but not configured. + */ + sysc_val = sysc_read_sysconfig(ddata); + } if (ddata->post_reset_quirk) ddata->post_reset_quirk(ddata); @@ -2187,9 +2171,8 @@ static int sysc_reset(struct sysc *ddata) static int sysc_init_module(struct sysc *ddata) { bool rstctrl_deasserted = false; - int error = 0; + int error = sysc_clockdomain_init(ddata); - error = sysc_clockdomain_init(ddata); if (error) return error; @@ -2230,12 +2213,14 @@ static int sysc_init_module(struct sysc *ddata) goto err_main_clocks; } - error = sysc_reset(ddata); - if (error) - dev_err(ddata->dev, "Reset failed with %d\n", error); + if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) { + error = sysc_reset(ddata); + if (error) + dev_err(ddata->dev, "Reset failed with %d\n", error); - if (error && !ddata->legacy_mode) - sysc_disable_module(ddata->dev); + if (error && !ddata->legacy_mode) + sysc_disable_module(ddata->dev); + } err_main_clocks: if (error) @@ -2273,11 +2258,9 @@ static int sysc_init_idlemode(struct sysc *ddata, u8 *idlemodes, const char *name) { struct device_node *np = ddata->dev->of_node; - struct property *prop; - const __be32 *p; u32 val; - of_property_for_each_u32(np, name, prop, p, val) { + of_property_for_each_u32(np, name, val) { if (val >= SYSC_NR_IDLEMODES) { dev_err(ddata->dev, "invalid idlemode: %i\n", val); return -EINVAL; @@ -2390,7 +2373,7 @@ static int sysc_child_add_clocks(struct sysc *ddata, return 0; } -static struct device_type sysc_device_type = { +static const struct device_type sysc_device_type = { }; static struct sysc *sysc_child_to_parent(struct device *dev) @@ -2437,98 +2420,13 @@ static int __maybe_unused sysc_child_runtime_resume(struct device *dev) return pm_generic_runtime_resume(dev); } -#ifdef CONFIG_PM_SLEEP -static int sysc_child_suspend_noirq(struct device *dev) -{ - struct sysc *ddata; - int error; - - ddata = sysc_child_to_parent(dev); - - dev_dbg(ddata->dev, "%s %s\n", __func__, - ddata->name ? ddata->name : ""); - - error = pm_generic_suspend_noirq(dev); - if (error) { - dev_err(dev, "%s error at %i: %i\n", - __func__, __LINE__, error); - - return error; - } - - if (!pm_runtime_status_suspended(dev)) { - error = pm_generic_runtime_suspend(dev); - if (error) { - dev_dbg(dev, "%s busy at %i: %i\n", - __func__, __LINE__, error); - - return 0; - } - - error = sysc_runtime_suspend(ddata->dev); - if (error) { - dev_err(dev, "%s error at %i: %i\n", - __func__, __LINE__, error); - - return error; - } - - ddata->child_needs_resume = true; - } - - return 0; -} - -static int sysc_child_resume_noirq(struct device *dev) -{ - struct sysc *ddata; - int error; - - ddata = sysc_child_to_parent(dev); - - dev_dbg(ddata->dev, "%s %s\n", __func__, - ddata->name ? ddata->name : ""); - - if (ddata->child_needs_resume) { - ddata->child_needs_resume = false; - - error = sysc_runtime_resume(ddata->dev); - if (error) - dev_err(ddata->dev, - "%s runtime resume error: %i\n", - __func__, error); - - error = pm_generic_runtime_resume(dev); - if (error) - dev_err(ddata->dev, - "%s generic runtime resume: %i\n", - __func__, error); - } - - return pm_generic_resume_noirq(dev); -} -#endif - -static struct dev_pm_domain sysc_child_pm_domain = { - .ops = { - SET_RUNTIME_PM_OPS(sysc_child_runtime_suspend, - sysc_child_runtime_resume, - NULL) - USE_PLATFORM_PM_SLEEP_OPS - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_child_suspend_noirq, - sysc_child_resume_noirq) - } -}; - /* Caller needs to take list_lock if ever used outside of cpu_pm */ static void sysc_reinit_modules(struct sysc_soc_info *soc) { struct sysc_module *module; - struct list_head *pos; struct sysc *ddata; - list_for_each(pos, &sysc_soc->restored_modules) { - module = list_entry(pos, struct sysc_module, node); + list_for_each_entry(module, &sysc_soc->restored_modules, node) { ddata = module->ddata; sysc_reinit_module(ddata, ddata->enabled); } @@ -2592,25 +2490,6 @@ out_unlock: mutex_unlock(&sysc_soc->list_lock); } -/** - * sysc_legacy_idle_quirk - handle children in omap_device compatible way - * @ddata: device driver data - * @child: child device driver - * - * Allow idle for child devices as done with _od_runtime_suspend(). - * Otherwise many child devices will not idle because of the permanent - * parent usecount set in pm_runtime_irq_safe(). - * - * Note that the long term solution is to just modify the child device - * drivers to not set pm_runtime_irq_safe() and then this can be just - * dropped. - */ -static void sysc_legacy_idle_quirk(struct sysc *ddata, struct device *child) -{ - if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE) - dev_pm_domain_set(child, &sysc_child_pm_domain); -} - static int sysc_notifier_call(struct notifier_block *nb, unsigned long event, void *device) { @@ -2627,7 +2506,6 @@ static int sysc_notifier_call(struct notifier_block *nb, error = sysc_child_add_clocks(ddata, dev); if (error) return error; - sysc_legacy_idle_quirk(ddata, dev); break; default: break; @@ -2658,14 +2536,12 @@ static const struct sysc_dts_quirk sysc_dts_quirks[] = { static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np, bool is_child) { - const struct property *prop; - int i, len; + int i; for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) { const char *name = sysc_dts_quirks[i].name; - prop = of_get_property(np, name, &len); - if (!prop) + if (!of_property_present(np, name)) continue; ddata->cfg.quirks |= sysc_dts_quirks[i].mask; @@ -2851,8 +2727,7 @@ static const struct sysc_capabilities sysc_34xx_sr = { .type = TI_SYSC_OMAP34XX_SR, .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY, .regbits = &sysc_regbits_omap34xx_sr, - .mod_quirks = SYSC_QUIRK_USE_CLOCKACT | SYSC_QUIRK_UNCACHED | - SYSC_QUIRK_LEGACY_IDLE, + .mod_quirks = SYSC_QUIRK_USE_CLOCKACT | SYSC_QUIRK_UNCACHED, }; /* @@ -2873,13 +2748,12 @@ static const struct sysc_capabilities sysc_36xx_sr = { .type = TI_SYSC_OMAP36XX_SR, .sysc_mask = SYSC_OMAP3_SR_ENAWAKEUP, .regbits = &sysc_regbits_omap36xx_sr, - .mod_quirks = SYSC_QUIRK_UNCACHED | SYSC_QUIRK_LEGACY_IDLE, + .mod_quirks = SYSC_QUIRK_UNCACHED, }; static const struct sysc_capabilities sysc_omap4_sr = { .type = TI_SYSC_OMAP4_SR, .regbits = &sysc_regbits_omap36xx_sr, - .mod_quirks = SYSC_QUIRK_LEGACY_IDLE, }; /* @@ -3039,6 +2913,8 @@ static void ti_sysc_idle(struct work_struct *work) static const struct soc_device_attribute sysc_soc_match[] = { SOC_FLAG("OMAP242*", SOC_2420), SOC_FLAG("OMAP243*", SOC_2430), + SOC_FLAG("AM33*", SOC_AM33), + SOC_FLAG("AM35*", SOC_AM35), SOC_FLAG("OMAP3[45]*", SOC_3430), SOC_FLAG("OMAP3[67]*", SOC_3630), SOC_FLAG("OMAP443*", SOC_4430), @@ -3123,7 +2999,7 @@ static int sysc_init_static_data(struct sysc *ddata) match = soc_device_match(sysc_soc_match); if (match && match->data) - sysc_soc->soc = (int)match->data; + sysc_soc->soc = (enum sysc_soc)(uintptr_t)match->data; /* * Check and warn about possible old incomplete dtb. We now want to see @@ -3208,12 +3084,10 @@ static void sysc_cleanup_static_data(void) static int sysc_check_disabled_devices(struct sysc *ddata) { struct sysc_address *disabled_module; - struct list_head *pos; int error = 0; mutex_lock(&sysc_soc->list_lock); - list_for_each(pos, &sysc_soc->disabled_modules) { - disabled_module = list_entry(pos, struct sysc_address, node); + list_for_each_entry(disabled_module, &sysc_soc->disabled_modules, node) { if (ddata->module_pa == disabled_module->base) { dev_dbg(ddata->dev, "module disabled for this SoC\n"); error = -ENODEV; @@ -3245,10 +3119,15 @@ static int sysc_check_active_timer(struct sysc *ddata) * can be dropped if we stop supporting old beagleboard revisions * A to B4 at some point. */ - if (sysc_soc->soc == SOC_3430) + switch (sysc_soc->soc) { + case SOC_AM33: + case SOC_3430: + case SOC_AM35: error = -ENXIO; - else + break; + default: error = -EBUSY; + } if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) && (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)) @@ -3390,7 +3269,7 @@ unprepare: return error; } -static int sysc_remove(struct platform_device *pdev) +static void sysc_remove(struct platform_device *pdev) { struct sysc *ddata = platform_get_drvdata(pdev); int error; @@ -3415,8 +3294,6 @@ static int sysc_remove(struct platform_device *pdev) unprepare: sysc_unprepare(ddata); - - return 0; } static const struct of_device_id sysc_match[] = { diff --git a/drivers/bus/ts-nbus.c b/drivers/bus/ts-nbus.c index 38c886dc2ed6..2328c48b9b12 100644 --- a/drivers/bus/ts-nbus.c +++ b/drivers/bus/ts-nbus.c @@ -39,45 +39,39 @@ struct ts_nbus { /* * request all gpios required by the bus. */ -static int ts_nbus_init_pdata(struct platform_device *pdev, struct ts_nbus - *ts_nbus) +static int ts_nbus_init_pdata(struct platform_device *pdev, + struct ts_nbus *ts_nbus) { ts_nbus->data = devm_gpiod_get_array(&pdev->dev, "ts,data", GPIOD_OUT_HIGH); - if (IS_ERR(ts_nbus->data)) { - dev_err(&pdev->dev, "failed to retrieve ts,data-gpio from dts\n"); - return PTR_ERR(ts_nbus->data); - } + if (IS_ERR(ts_nbus->data)) + return dev_err_probe(&pdev->dev, PTR_ERR(ts_nbus->data), + "failed to retrieve ts,data-gpio from dts\n"); ts_nbus->csn = devm_gpiod_get(&pdev->dev, "ts,csn", GPIOD_OUT_HIGH); - if (IS_ERR(ts_nbus->csn)) { - dev_err(&pdev->dev, "failed to retrieve ts,csn-gpio from dts\n"); - return PTR_ERR(ts_nbus->csn); - } + if (IS_ERR(ts_nbus->csn)) + return dev_err_probe(&pdev->dev, PTR_ERR(ts_nbus->csn), + "failed to retrieve ts,csn-gpio from dts\n"); ts_nbus->txrx = devm_gpiod_get(&pdev->dev, "ts,txrx", GPIOD_OUT_HIGH); - if (IS_ERR(ts_nbus->txrx)) { - dev_err(&pdev->dev, "failed to retrieve ts,txrx-gpio from dts\n"); - return PTR_ERR(ts_nbus->txrx); - } + if (IS_ERR(ts_nbus->txrx)) + return dev_err_probe(&pdev->dev, PTR_ERR(ts_nbus->txrx), + "failed to retrieve ts,txrx-gpio from dts\n"); ts_nbus->strobe = devm_gpiod_get(&pdev->dev, "ts,strobe", GPIOD_OUT_HIGH); - if (IS_ERR(ts_nbus->strobe)) { - dev_err(&pdev->dev, "failed to retrieve ts,strobe-gpio from dts\n"); - return PTR_ERR(ts_nbus->strobe); - } + if (IS_ERR(ts_nbus->strobe)) + return dev_err_probe(&pdev->dev, PTR_ERR(ts_nbus->strobe), + "failed to retrieve ts,strobe-gpio from dts\n"); ts_nbus->ale = devm_gpiod_get(&pdev->dev, "ts,ale", GPIOD_OUT_HIGH); - if (IS_ERR(ts_nbus->ale)) { - dev_err(&pdev->dev, "failed to retrieve ts,ale-gpio from dts\n"); - return PTR_ERR(ts_nbus->ale); - } + if (IS_ERR(ts_nbus->ale)) + return dev_err_probe(&pdev->dev, PTR_ERR(ts_nbus->ale), + "failed to retrieve ts,ale-gpio from dts\n"); ts_nbus->rdy = devm_gpiod_get(&pdev->dev, "ts,rdy", GPIOD_IN); - if (IS_ERR(ts_nbus->rdy)) { - dev_err(&pdev->dev, "failed to retrieve ts,rdy-gpio from dts\n"); - return PTR_ERR(ts_nbus->rdy); - } + if (IS_ERR(ts_nbus->rdy)) + return dev_err_probe(&pdev->dev, PTR_ERR(ts_nbus->rdy), + "failed to retrieve ts,rdy-gpio from dts\n"); return 0; } @@ -273,7 +267,7 @@ EXPORT_SYMBOL_GPL(ts_nbus_write); static int ts_nbus_probe(struct platform_device *pdev) { struct pwm_device *pwm; - struct pwm_args pargs; + struct pwm_state state; struct device *dev = &pdev->dev; struct ts_nbus *ts_nbus; int ret; @@ -289,32 +283,24 @@ static int ts_nbus_probe(struct platform_device *pdev) return ret; pwm = devm_pwm_get(dev, NULL); - if (IS_ERR(pwm)) { - ret = PTR_ERR(pwm); - if (ret != -EPROBE_DEFER) - dev_err(dev, "unable to request PWM\n"); - return ret; - } + if (IS_ERR(pwm)) + return dev_err_probe(dev, PTR_ERR(pwm), + "unable to request PWM\n"); - pwm_get_args(pwm, &pargs); - if (!pargs.period) { - dev_err(&pdev->dev, "invalid PWM period\n"); - return -EINVAL; - } + pwm_init_state(pwm, &state); + if (!state.period) + return dev_err_probe(dev, -EINVAL, "invalid PWM period\n"); - /* - * FIXME: pwm_apply_args() should be removed when switching to - * the atomic PWM API. - */ - pwm_apply_args(pwm); - ret = pwm_config(pwm, pargs.period, pargs.period); + state.duty_cycle = state.period; + state.enabled = true; + + ret = pwm_apply_might_sleep(pwm, &state); if (ret < 0) - return ret; + return dev_err_probe(dev, ret, "failed to configure PWM\n"); /* * we can now start the FPGA and populate the peripherals. */ - pwm_enable(pwm); ts_nbus->pwm = pwm; /* @@ -324,14 +310,15 @@ static int ts_nbus_probe(struct platform_device *pdev) ret = of_platform_populate(dev->of_node, NULL, NULL, dev); if (ret < 0) - return ret; + return dev_err_probe(dev, ret, + "failed to populate platform devices on bus\n"); dev_info(dev, "initialized\n"); return 0; } -static int ts_nbus_remove(struct platform_device *pdev) +static void ts_nbus_remove(struct platform_device *pdev) { struct ts_nbus *ts_nbus = dev_get_drvdata(&pdev->dev); @@ -339,8 +326,6 @@ static int ts_nbus_remove(struct platform_device *pdev) mutex_lock(&ts_nbus->lock); pwm_disable(ts_nbus->pwm); mutex_unlock(&ts_nbus->lock); - - return 0; } static const struct of_device_id ts_nbus_of_match[] = { diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c index f70dedace20b..cb5c89ce7b86 100644 --- a/drivers/bus/uniphier-system-bus.c +++ b/drivers/bus/uniphier-system-bus.c @@ -176,10 +176,9 @@ static int uniphier_system_bus_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct uniphier_system_bus_priv *priv; - const __be32 *ranges; - u32 cells, addr, size; - u64 paddr; - int pna, bank, rlen, rone, ret; + struct of_range_parser parser; + struct of_range range; + int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -191,48 +190,17 @@ static int uniphier_system_bus_probe(struct platform_device *pdev) priv->dev = dev; - pna = of_n_addr_cells(dev->of_node); - - ret = of_property_read_u32(dev->of_node, "#address-cells", &cells); - if (ret) { - dev_err(dev, "failed to get #address-cells\n"); - return ret; - } - if (cells != 2) { - dev_err(dev, "#address-cells must be 2\n"); - return -EINVAL; - } - - ret = of_property_read_u32(dev->of_node, "#size-cells", &cells); - if (ret) { - dev_err(dev, "failed to get #size-cells\n"); + ret = of_range_parser_init(&parser, dev->of_node); + if (ret) return ret; - } - if (cells != 1) { - dev_err(dev, "#size-cells must be 1\n"); - return -EINVAL; - } - ranges = of_get_property(dev->of_node, "ranges", &rlen); - if (!ranges) { - dev_err(dev, "failed to get ranges property\n"); - return -ENOENT; - } - - rlen /= sizeof(*ranges); - rone = pna + 2; - - for (; rlen >= rone; rlen -= rone) { - bank = be32_to_cpup(ranges++); - addr = be32_to_cpup(ranges++); - paddr = of_translate_address(dev->of_node, ranges); - if (paddr == OF_BAD_ADDR) + for_each_of_range(&parser, &range) { + if (range.cpu_addr == OF_BAD_ADDR) return -EINVAL; - ranges += pna; - size = be32_to_cpup(ranges++); - - ret = uniphier_system_bus_add_bank(priv, bank, addr, - paddr, size); + ret = uniphier_system_bus_add_bank(priv, + upper_32_bits(range.bus_addr), + lower_32_bits(range.bus_addr), + range.cpu_addr, range.size); if (ret) return ret; } diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c index a58ac0c8e282..64ee920721ee 100644 --- a/drivers/bus/vexpress-config.c +++ b/drivers/bus/vexpress-config.c @@ -10,7 +10,7 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> -#include <linux/of_device.h> +#include <linux/of_platform.h> #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/vexpress.h> @@ -54,7 +54,7 @@ struct vexpress_syscfg_func { struct vexpress_syscfg *syscfg; struct regmap *regmap; int num_templates; - u32 template[]; /* Keep it last! */ + u32 template[] __counted_by(num_templates); /* Keep it last! */ }; struct vexpress_config_bridge_ops { @@ -350,7 +350,6 @@ static struct vexpress_config_bridge_ops vexpress_syscfg_bridge_ops = { static int vexpress_syscfg_probe(struct platform_device *pdev) { struct vexpress_syscfg *syscfg; - struct resource *res; struct vexpress_config_bridge *bridge; struct device_node *node; int master; @@ -362,8 +361,7 @@ static int vexpress_syscfg_probe(struct platform_device *pdev) syscfg->dev = &pdev->dev; INIT_LIST_HEAD(&syscfg->funcs); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - syscfg->base = devm_ioremap_resource(&pdev->dev, res); + syscfg->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(syscfg->base)) return PTR_ERR(syscfg->base); @@ -416,4 +414,5 @@ static struct platform_driver vexpress_syscfg_driver = { .probe = vexpress_syscfg_probe, }; module_platform_driver(vexpress_syscfg_driver); +MODULE_DESCRIPTION("Versatile Express configuration bus"); MODULE_LICENSE("GPL v2"); |
