diff options
Diffstat (limited to 'drivers/bus')
28 files changed, 571 insertions, 373 deletions
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index fcfa280df98a..e6742998f372 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -31,7 +31,7 @@ config ARM_INTEGRATOR_LM config BRCMSTB_GISB_ARB tristate "Broadcom STB GISB bus arbiter" - depends on ARM || ARM64 || MIPS + depends on ARCH_BRCMSTB || BMIPS_GENERIC default ARCH_BRCMSTB || BMIPS_GENERIC help Driver for the Broadcom Set Top Box System-on-a-chip internal bus @@ -210,7 +210,8 @@ config TI_PWMSS config TI_SYSC bool "TI sysc interconnect target module driver" - depends on ARCH_OMAP2PLUS + depends on ARCH_OMAP2PLUS || ARCH_K3 + default y help Generic driver for Texas Instruments interconnect target module found on many TI SoCs. diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c index d129338b8bc0..dd1b5c0fb7e2 100644 --- a/drivers/bus/fsl-mc/dprc.c +++ b/drivers/bus/fsl-mc/dprc.c @@ -450,10 +450,8 @@ int dprc_get_obj(struct fsl_mc_io *mc_io, obj_desc->ver_major = le16_to_cpu(rsp_params->version_major); obj_desc->ver_minor = le16_to_cpu(rsp_params->version_minor); obj_desc->flags = le16_to_cpu(rsp_params->flags); - strncpy(obj_desc->type, rsp_params->type, 16); - obj_desc->type[15] = '\0'; - strncpy(obj_desc->label, rsp_params->label, 16); - obj_desc->label[15] = '\0'; + strscpy_pad(obj_desc->type, rsp_params->type, 16); + strscpy_pad(obj_desc->label, rsp_params->label, 16); return 0; } EXPORT_SYMBOL_GPL(dprc_get_obj); @@ -491,8 +489,7 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io, cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr); cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num); cmd_params->obj_id = cpu_to_le32(obj_id); - strncpy(cmd_params->obj_type, obj_type, 16); - cmd_params->obj_type[15] = '\0'; + strscpy_pad(cmd_params->obj_type, obj_type, 16); /* send command to mc*/ return mc_send_command(mc_io, &cmd); @@ -564,8 +561,7 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io, cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params; cmd_params->obj_id = cpu_to_le32(obj_id); cmd_params->region_index = region_index; - strncpy(cmd_params->obj_type, obj_type, 16); - cmd_params->obj_type[15] = '\0'; + strscpy_pad(cmd_params->obj_type, obj_type, 16); /* send command to mc*/ err = mc_send_command(mc_io, &cmd); diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c index 4352745a923c..78b96cd63de9 100644 --- a/drivers/bus/fsl-mc/fsl-mc-bus.c +++ b/drivers/bus/fsl-mc/fsl-mc-bus.c @@ -14,6 +14,7 @@ #include <linux/of_device.h> #include <linux/of_address.h> #include <linux/ioport.h> +#include <linux/platform_device.h> #include <linux/slab.h> #include <linux/limits.h> #include <linux/bitops.h> @@ -994,75 +995,18 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev, } EXPORT_SYMBOL_GPL(fsl_mc_get_endpoint); -static int parse_mc_ranges(struct device *dev, - int *paddr_cells, - int *mc_addr_cells, - int *mc_size_cells, - const __be32 **ranges_start) -{ - const __be32 *prop; - int range_tuple_cell_count; - int ranges_len; - int tuple_len; - struct device_node *mc_node = dev->of_node; - - *ranges_start = of_get_property(mc_node, "ranges", &ranges_len); - if (!(*ranges_start) || !ranges_len) { - dev_warn(dev, - "missing or empty ranges property for device tree node '%pOFn'\n", - mc_node); - return 0; - } - - *paddr_cells = of_n_addr_cells(mc_node); - - prop = of_get_property(mc_node, "#address-cells", NULL); - if (prop) - *mc_addr_cells = be32_to_cpup(prop); - else - *mc_addr_cells = *paddr_cells; - - prop = of_get_property(mc_node, "#size-cells", NULL); - if (prop) - *mc_size_cells = be32_to_cpup(prop); - else - *mc_size_cells = of_n_size_cells(mc_node); - - range_tuple_cell_count = *paddr_cells + *mc_addr_cells + - *mc_size_cells; - - tuple_len = range_tuple_cell_count * sizeof(__be32); - if (ranges_len % tuple_len != 0) { - dev_err(dev, "malformed ranges property '%pOFn'\n", mc_node); - return -EINVAL; - } - - return ranges_len / tuple_len; -} - static int get_mc_addr_translation_ranges(struct device *dev, struct fsl_mc_addr_translation_range **ranges, u8 *num_ranges) { - int ret; - int paddr_cells; - int mc_addr_cells; - int mc_size_cells; - int i; - const __be32 *ranges_start; - const __be32 *cell; - - ret = parse_mc_ranges(dev, - &paddr_cells, - &mc_addr_cells, - &mc_size_cells, - &ranges_start); - if (ret < 0) - return ret; + struct fsl_mc_addr_translation_range *r; + struct of_range_parser parser; + struct of_range range; - *num_ranges = ret; - if (!ret) { + of_range_parser_init(&parser, dev->of_node); + *num_ranges = of_range_count(&parser); + if (!*num_ranges) { /* * Missing or empty ranges property ("ranges;") for the * 'fsl,qoriq-mc' node. In this case, identity mapping @@ -1078,20 +1022,13 @@ static int get_mc_addr_translation_ranges(struct device *dev, if (!(*ranges)) return -ENOMEM; - cell = ranges_start; - for (i = 0; i < *num_ranges; ++i) { - struct fsl_mc_addr_translation_range *range = &(*ranges)[i]; - - range->mc_region_type = of_read_number(cell, 1); - range->start_mc_offset = of_read_number(cell + 1, - mc_addr_cells - 1); - cell += mc_addr_cells; - range->start_phys_addr = of_read_number(cell, paddr_cells); - cell += paddr_cells; - range->end_mc_offset = range->start_mc_offset + - of_read_number(cell, mc_size_cells); - - cell += mc_size_cells; + r = *ranges; + for_each_of_range(&parser, &range) { + r->mc_region_type = range.flags; + r->start_mc_offset = range.bus_addr; + r->end_mc_offset = range.bus_addr + range.size; + r->start_phys_addr = range.cpu_addr; + r++; } return 0; @@ -1230,14 +1167,11 @@ error_cleanup_mc_io: * fsl_mc_bus_remove - callback invoked when the root MC bus is being * removed */ -static int fsl_mc_bus_remove(struct platform_device *pdev) +static void fsl_mc_bus_remove(struct platform_device *pdev) { struct fsl_mc *mc = platform_get_drvdata(pdev); struct fsl_mc_io *mc_io; - if (!fsl_mc_is_root_dprc(&mc->root_mc_bus_dev->dev)) - return -EINVAL; - mc_io = mc->root_mc_bus_dev->mc_io; fsl_mc_device_remove(mc->root_mc_bus_dev); fsl_destroy_mc_io(mc_io); @@ -1253,13 +1187,6 @@ static int fsl_mc_bus_remove(struct platform_device *pdev) (GCR1_P1_STOP | GCR1_P2_STOP), mc->fsl_mc_regs + FSL_MC_GCR1); } - - return 0; -} - -static void fsl_mc_bus_shutdown(struct platform_device *pdev) -{ - fsl_mc_bus_remove(pdev); } static const struct of_device_id fsl_mc_bus_match_table[] = { @@ -1283,8 +1210,8 @@ static struct platform_driver fsl_mc_bus_driver = { .acpi_match_table = fsl_mc_bus_acpi_match_table, }, .probe = fsl_mc_bus_probe, - .remove = fsl_mc_bus_remove, - .shutdown = fsl_mc_bus_shutdown, + .remove_new = fsl_mc_bus_remove, + .shutdown = fsl_mc_bus_remove, }; static int fsl_mc_bus_notifier(struct notifier_block *nb, diff --git a/drivers/bus/fsl-mc/fsl-mc-msi.c b/drivers/bus/fsl-mc/fsl-mc-msi.c index f3f8af9426c9..82cd69f7884c 100644 --- a/drivers/bus/fsl-mc/fsl-mc-msi.c +++ b/drivers/bus/fsl-mc/fsl-mc-msi.c @@ -7,8 +7,6 @@ * */ -#include <linux/of_device.h> -#include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/irq.h> #include <linux/irqdomain.h> diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c index 5b65a48f17e7..09340adbacc2 100644 --- a/drivers/bus/hisi_lpc.c +++ b/drivers/bus/hisi_lpc.c @@ -13,9 +13,9 @@ #include <linux/logic_pio.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/pci.h> +#include <linux/platform_device.h> #include <linux/serial_8250.h> #include <linux/slab.h> @@ -657,7 +657,7 @@ static int hisi_lpc_probe(struct platform_device *pdev) return ret; } -static int hisi_lpc_remove(struct platform_device *pdev) +static void hisi_lpc_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct hisi_lpc_dev *lpcdev = dev_get_drvdata(dev); @@ -669,8 +669,6 @@ static int hisi_lpc_remove(struct platform_device *pdev) of_platform_depopulate(dev); logic_pio_unregister_range(range); - - return 0; } static const struct of_device_id hisi_lpc_of_match[] = { @@ -691,6 +689,6 @@ static struct platform_driver hisi_lpc_driver = { .acpi_match_table = hisi_lpc_acpi_match, }, .probe = hisi_lpc_probe, - .remove = hisi_lpc_remove, + .remove_new = hisi_lpc_remove, }; builtin_platform_driver(hisi_lpc_driver); diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c index 52a5d0447390..837bf9d51c6e 100644 --- a/drivers/bus/imx-weim.c +++ b/drivers/bus/imx-weim.c @@ -11,7 +11,10 @@ #include <linux/clk.h> #include <linux/io.h> #include <linux/of_address.h> -#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/property.h> #include <linux/mfd/syscon.h> #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> #include <linux/regmap.h> @@ -117,7 +120,7 @@ static int imx_weim_gpr_setup(struct platform_device *pdev) i++; } - if (i == 0 || i % 4) + if (i == 0) goto err; for (i = 0; i < ARRAY_SIZE(gprvals); i++) { @@ -202,9 +205,7 @@ static int weim_timing_setup(struct device *dev, struct device_node *np, static int weim_parse_dt(struct platform_device *pdev) { - const struct of_device_id *of_id = of_match_device(weim_id_table, - &pdev->dev); - const struct imx_weim_devtype *devtype = of_id->data; + const struct imx_weim_devtype *devtype = device_get_match_data(&pdev->dev); int ret = 0, have_child = 0; struct device_node *child; struct weim_priv *priv; @@ -273,7 +274,7 @@ static int weim_probe(struct platform_device *pdev) return -ENOMEM; /* get the resource */ - base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h index a2125fa5fe2f..577965f95fda 100644 --- a/drivers/bus/mhi/ep/internal.h +++ b/drivers/bus/mhi/ep/internal.h @@ -126,6 +126,7 @@ struct mhi_ep_ring { union mhi_ep_ring_ctx *ring_ctx; struct mhi_ring_element *ring_cache; enum mhi_ep_ring_type type; + struct delayed_work intmodt_work; u64 rbase; size_t rd_offset; size_t wr_offset; @@ -135,7 +136,9 @@ struct mhi_ep_ring { u32 ch_id; u32 er_index; u32 irq_vector; + u32 intmodt; bool started; + bool irq_pending; }; struct mhi_ep_cmd { @@ -159,6 +162,7 @@ struct mhi_ep_chan { void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result); enum mhi_ch_state state; enum dma_data_direction dir; + size_t rd_offset; u64 tre_loc; u32 tre_size; u32 tre_bytes_left; diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 600881808982..65fc1d738bec 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -54,11 +54,27 @@ static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, mutex_unlock(&mhi_cntrl->event_lock); /* - * Raise IRQ to host only if the BEI flag is not set in TRE. Host might - * set this flag for interrupt moderation as per MHI protocol. + * As per the MHI specification, section 4.3, Interrupt moderation: + * + * 1. If BEI flag is not set, cancel any pending intmodt work if started + * for the event ring and raise IRQ immediately. + * + * 2. If both BEI and intmodt are set, and if no IRQ is pending for the + * same event ring, start the IRQ delayed work as per the value of + * intmodt. If previous IRQ is pending, then do nothing as the pending + * IRQ is enough for the host to process the current event ring element. + * + * 3. If BEI is set and intmodt is not set, no need to raise IRQ. */ - if (!bei) + if (!bei) { + if (READ_ONCE(ring->irq_pending)) + cancel_delayed_work(&ring->intmodt_work); + mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector); + } else if (ring->intmodt && !READ_ONCE(ring->irq_pending)) { + WRITE_ONCE(ring->irq_pending, true); + schedule_delayed_work(&ring->intmodt_work, msecs_to_jiffies(ring->intmodt)); + } return 0; @@ -71,45 +87,77 @@ err_unlock: static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code) { - struct mhi_ring_element event = {}; + struct mhi_ring_element *event; + int ret; + + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); + if (!event) + return -ENOMEM; - event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre)); - event.dword[0] = MHI_TRE_EV_DWORD0(code, len); - event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT); + event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre)); + event->dword[0] = MHI_TRE_EV_DWORD0(code, len); + event->dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT); - return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre)); + ret = mhi_ep_send_event(mhi_cntrl, ring->er_index, event, MHI_TRE_DATA_GET_BEI(tre)); + kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); + + return ret; } int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state) { - struct mhi_ring_element event = {}; + struct mhi_ring_element *event; + int ret; - event.dword[0] = MHI_SC_EV_DWORD0(state); - event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT); + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); + if (!event) + return -ENOMEM; - return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); + event->dword[0] = MHI_SC_EV_DWORD0(state); + event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT); + + ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); + kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); + + return ret; } int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env) { - struct mhi_ring_element event = {}; + struct mhi_ring_element *event; + int ret; - event.dword[0] = MHI_EE_EV_DWORD0(exec_env); - event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT); + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); + if (!event) + return -ENOMEM; + + event->dword[0] = MHI_EE_EV_DWORD0(exec_env); + event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT); - return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); + ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); + kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); + + return ret; } static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code) { struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; - struct mhi_ring_element event = {}; + struct mhi_ring_element *event; + int ret; + + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); + if (!event) + return -ENOMEM; + + event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element)); + event->dword[0] = MHI_CC_EV_DWORD0(code); + event->dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT); - event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element)); - event.dword[0] = MHI_CC_EV_DWORD0(code); - event.dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT); + ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); + kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); - return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); + return ret; } static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) @@ -151,6 +199,8 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele goto err_unlock; } + + mhi_chan->rd_offset = ch_ring->rd_offset; } /* Set channel state to RUNNING */ @@ -280,22 +330,85 @@ bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_directio struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; - return !!(ring->rd_offset == ring->wr_offset); + return !!(mhi_chan->rd_offset == ring->wr_offset); } EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty); +static void mhi_ep_read_completion(struct mhi_ep_buf_info *buf_info) +{ + struct mhi_ep_device *mhi_dev = buf_info->mhi_dev; + struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ep_chan *mhi_chan = mhi_dev->ul_chan; + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; + struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset]; + struct mhi_result result = {}; + int ret; + + if (mhi_chan->xfer_cb) { + result.buf_addr = buf_info->cb_buf; + result.dir = mhi_chan->dir; + result.bytes_xferd = buf_info->size; + + mhi_chan->xfer_cb(mhi_dev, &result); + } + + /* + * The host will split the data packet into multiple TREs if it can't fit + * the packet in a single TRE. In that case, CHAIN flag will be set by the + * host for all TREs except the last one. + */ + if (buf_info->code != MHI_EV_CC_OVERFLOW) { + if (MHI_TRE_DATA_GET_CHAIN(el)) { + /* + * IEOB (Interrupt on End of Block) flag will be set by the host if + * it expects the completion event for all TREs of a TD. + */ + if (MHI_TRE_DATA_GET_IEOB(el)) { + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, + MHI_TRE_DATA_GET_LEN(el), + MHI_EV_CC_EOB); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, + "Error sending transfer compl. event\n"); + goto err_free_tre_buf; + } + } + } else { + /* + * IEOT (Interrupt on End of Transfer) flag will be set by the host + * for the last TRE of the TD and expects the completion event for + * the same. + */ + if (MHI_TRE_DATA_GET_IEOT(el)) { + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, + MHI_TRE_DATA_GET_LEN(el), + MHI_EV_CC_EOT); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, + "Error sending transfer compl. event\n"); + goto err_free_tre_buf; + } + } + } + } + + mhi_ep_ring_inc_index(ring); + +err_free_tre_buf: + kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_info->cb_buf); +} + static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, - struct mhi_ep_ring *ring, - struct mhi_result *result, - u32 len) + struct mhi_ep_ring *ring) { struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; struct device *dev = &mhi_cntrl->mhi_dev->dev; size_t tr_len, read_offset, write_offset; + struct mhi_ep_buf_info buf_info = {}; + u32 len = MHI_EP_DEFAULT_MTU; struct mhi_ring_element *el; bool tr_done = false; - void *write_addr; - u64 read_addr; + void *buf_addr; u32 buf_left; int ret; @@ -308,7 +421,7 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, return -ENODEV; } - el = &ring->ring_cache[ring->rd_offset]; + el = &ring->ring_cache[mhi_chan->rd_offset]; /* Check if there is data pending to be read from previous read operation */ if (mhi_chan->tre_bytes_left) { @@ -324,81 +437,51 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; write_offset = len - buf_left; - read_addr = mhi_chan->tre_loc + read_offset; - write_addr = result->buf_addr + write_offset; + + buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA); + if (!buf_addr) + return -ENOMEM; + + buf_info.host_addr = mhi_chan->tre_loc + read_offset; + buf_info.dev_addr = buf_addr + write_offset; + buf_info.size = tr_len; + buf_info.cb = mhi_ep_read_completion; + buf_info.cb_buf = buf_addr; + buf_info.mhi_dev = mhi_chan->mhi_dev; + + if (mhi_chan->tre_bytes_left - tr_len) + buf_info.code = MHI_EV_CC_OVERFLOW; dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id); - ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len); + ret = mhi_cntrl->read_async(mhi_cntrl, &buf_info); if (ret < 0) { dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n"); - return ret; + goto err_free_buf_addr; } buf_left -= tr_len; mhi_chan->tre_bytes_left -= tr_len; - /* - * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been - * read completely: - * - * 1. Send completion event to the host based on the flags set in TRE. - * 2. Increment the local read offset of the transfer ring. - */ if (!mhi_chan->tre_bytes_left) { - /* - * The host will split the data packet into multiple TREs if it can't fit - * the packet in a single TRE. In that case, CHAIN flag will be set by the - * host for all TREs except the last one. - */ - if (MHI_TRE_DATA_GET_CHAIN(el)) { - /* - * IEOB (Interrupt on End of Block) flag will be set by the host if - * it expects the completion event for all TREs of a TD. - */ - if (MHI_TRE_DATA_GET_IEOB(el)) { - ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, - MHI_TRE_DATA_GET_LEN(el), - MHI_EV_CC_EOB); - if (ret < 0) { - dev_err(&mhi_chan->mhi_dev->dev, - "Error sending transfer compl. event\n"); - return ret; - } - } - } else { - /* - * IEOT (Interrupt on End of Transfer) flag will be set by the host - * for the last TRE of the TD and expects the completion event for - * the same. - */ - if (MHI_TRE_DATA_GET_IEOT(el)) { - ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, - MHI_TRE_DATA_GET_LEN(el), - MHI_EV_CC_EOT); - if (ret < 0) { - dev_err(&mhi_chan->mhi_dev->dev, - "Error sending transfer compl. event\n"); - return ret; - } - } - + if (MHI_TRE_DATA_GET_IEOT(el)) tr_done = true; - } - mhi_ep_ring_inc_index(ring); + mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size; } - - result->bytes_xferd += tr_len; } while (buf_left && !tr_done); return 0; + +err_free_buf_addr: + kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_addr); + + return ret; } -static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) +static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring) { struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; struct mhi_result result = {}; - u32 len = MHI_EP_DEFAULT_MTU; struct mhi_ep_chan *mhi_chan; int ret; @@ -419,44 +502,59 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); } else { /* UL channel */ - result.buf_addr = kzalloc(len, GFP_KERNEL); - if (!result.buf_addr) - return -ENOMEM; - do { - ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len); + ret = mhi_ep_read_channel(mhi_cntrl, ring); if (ret < 0) { dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); - kfree(result.buf_addr); return ret; } - result.dir = mhi_chan->dir; - mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); - result.bytes_xferd = 0; - memset(result.buf_addr, 0, len); - /* Read until the ring becomes empty */ } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); - - kfree(result.buf_addr); } return 0; } +static void mhi_ep_skb_completion(struct mhi_ep_buf_info *buf_info) +{ + struct mhi_ep_device *mhi_dev = buf_info->mhi_dev; + struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; + struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset]; + struct device *dev = &mhi_dev->dev; + struct mhi_result result = {}; + int ret; + + if (mhi_chan->xfer_cb) { + result.buf_addr = buf_info->cb_buf; + result.dir = mhi_chan->dir; + result.bytes_xferd = buf_info->size; + + mhi_chan->xfer_cb(mhi_dev, &result); + } + + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, buf_info->size, + buf_info->code); + if (ret) { + dev_err(dev, "Error sending transfer completion event\n"); + return; + } + + mhi_ep_ring_inc_index(ring); +} + /* TODO: Handle partially formed TDs */ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb) { struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; struct device *dev = &mhi_chan->mhi_dev->dev; + struct mhi_ep_buf_info buf_info = {}; struct mhi_ring_element *el; u32 buf_left, read_offset; struct mhi_ep_ring *ring; - enum mhi_ev_ccs code; - void *read_addr; - u64 write_addr; size_t tr_len; u32 tre_len; int ret; @@ -480,40 +578,44 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb) goto err_exit; } - el = &ring->ring_cache[ring->rd_offset]; + el = &ring->ring_cache[mhi_chan->rd_offset]; tre_len = MHI_TRE_DATA_GET_LEN(el); tr_len = min(buf_left, tre_len); read_offset = skb->len - buf_left; - read_addr = skb->data + read_offset; - write_addr = MHI_TRE_DATA_GET_PTR(el); - dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id); - ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len); - if (ret < 0) { - dev_err(dev, "Error writing to the channel\n"); - goto err_exit; - } + buf_info.dev_addr = skb->data + read_offset; + buf_info.host_addr = MHI_TRE_DATA_GET_PTR(el); + buf_info.size = tr_len; + buf_info.cb = mhi_ep_skb_completion; + buf_info.cb_buf = skb; + buf_info.mhi_dev = mhi_dev; - buf_left -= tr_len; /* * For all TREs queued by the host for DL channel, only the EOT flag will be set. * If the packet doesn't fit into a single TRE, send the OVERFLOW event to * the host so that the host can adjust the packet boundary to next TREs. Else send * the EOT event to the host indicating the packet boundary. */ - if (buf_left) - code = MHI_EV_CC_OVERFLOW; + if (buf_left - tr_len) + buf_info.code = MHI_EV_CC_OVERFLOW; else - code = MHI_EV_CC_EOT; + buf_info.code = MHI_EV_CC_EOT; - ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code); - if (ret) { - dev_err(dev, "Error sending transfer completion event\n"); + dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id); + ret = mhi_cntrl->write_async(mhi_cntrl, &buf_info); + if (ret < 0) { + dev_err(dev, "Error writing to the channel\n"); goto err_exit; } - mhi_ep_ring_inc_index(ring); + buf_left -= tr_len; + + /* + * Update the read offset cached in mhi_chan. Actual read offset + * will be updated by the completion handler. + */ + mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size; } while (buf_left); mutex_unlock(&mhi_chan->lock); @@ -714,7 +816,6 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work) struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work); struct device *dev = &mhi_cntrl->mhi_dev->dev; struct mhi_ep_ring_item *itr, *tmp; - struct mhi_ring_element *el; struct mhi_ep_ring *ring; struct mhi_ep_chan *chan; unsigned long flags; @@ -748,31 +849,29 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work) if (ret) { dev_err(dev, "Error updating write offset for ring\n"); mutex_unlock(&chan->lock); - kfree(itr); + kmem_cache_free(mhi_cntrl->ring_item_cache, itr); continue; } /* Sanity check to make sure there are elements in the ring */ - if (ring->rd_offset == ring->wr_offset) { + if (chan->rd_offset == ring->wr_offset) { mutex_unlock(&chan->lock); - kfree(itr); + kmem_cache_free(mhi_cntrl->ring_item_cache, itr); continue; } - el = &ring->ring_cache[ring->rd_offset]; - dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id); - ret = mhi_ep_process_ch_ring(ring, el); + ret = mhi_ep_process_ch_ring(ring); if (ret) { dev_err(dev, "Error processing ring for channel (%u): %d\n", ring->ch_id, ret); mutex_unlock(&chan->lock); - kfree(itr); + kmem_cache_free(mhi_cntrl->ring_item_cache, itr); continue; } mutex_unlock(&chan->lock); - kfree(itr); + kmem_cache_free(mhi_cntrl->ring_item_cache, itr); } } @@ -828,7 +927,7 @@ static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned lon u32 ch_id = ch_idx + i; ring = &mhi_cntrl->mhi_chan[ch_id].ring; - item = kzalloc(sizeof(*item), GFP_ATOMIC); + item = kmem_cache_zalloc(mhi_cntrl->ring_item_cache, GFP_ATOMIC); if (!item) return; @@ -1365,6 +1464,10 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq) return -EINVAL; + if (!mhi_cntrl->read_sync || !mhi_cntrl->write_sync || + !mhi_cntrl->read_async || !mhi_cntrl->write_async) + return -EINVAL; + ret = mhi_ep_chan_init(mhi_cntrl, config); if (ret) return ret; @@ -1375,6 +1478,29 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, goto err_free_ch; } + mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el", + sizeof(struct mhi_ring_element), 0, + SLAB_CACHE_DMA, NULL); + if (!mhi_cntrl->ev_ring_el_cache) { + ret = -ENOMEM; + goto err_free_cmd; + } + + mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0, + SLAB_CACHE_DMA, NULL); + if (!mhi_cntrl->tre_buf_cache) { + ret = -ENOMEM; + goto err_destroy_ev_ring_el_cache; + } + + mhi_cntrl->ring_item_cache = kmem_cache_create("mhi_ep_ring_item", + sizeof(struct mhi_ep_ring_item), 0, + 0, NULL); + if (!mhi_cntrl->ev_ring_el_cache) { + ret = -ENOMEM; + goto err_destroy_tre_buf_cache; + } + INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker); @@ -1383,7 +1509,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); if (!mhi_cntrl->wq) { ret = -ENOMEM; - goto err_free_cmd; + goto err_destroy_ring_item_cache; } INIT_LIST_HEAD(&mhi_cntrl->st_transition_list); @@ -1442,6 +1568,12 @@ err_ida_free: ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); err_destroy_wq: destroy_workqueue(mhi_cntrl->wq); +err_destroy_ring_item_cache: + kmem_cache_destroy(mhi_cntrl->ring_item_cache); +err_destroy_ev_ring_el_cache: + kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache); +err_destroy_tre_buf_cache: + kmem_cache_destroy(mhi_cntrl->tre_buf_cache); err_free_cmd: kfree(mhi_cntrl->mhi_cmd); err_free_ch: @@ -1463,6 +1595,9 @@ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) free_irq(mhi_cntrl->irq, mhi_cntrl); + kmem_cache_destroy(mhi_cntrl->tre_buf_cache); + kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache); + kmem_cache_destroy(mhi_cntrl->ring_item_cache); kfree(mhi_cntrl->mhi_cmd); kfree(mhi_cntrl->mhi_chan); diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c index 115518ec76a4..aeb53b2c34a8 100644 --- a/drivers/bus/mhi/ep/ring.c +++ b/drivers/bus/mhi/ep/ring.c @@ -30,7 +30,8 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end) { struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; struct device *dev = &mhi_cntrl->mhi_dev->dev; - size_t start, copy_size; + struct mhi_ep_buf_info buf_info = {}; + size_t start; int ret; /* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */ @@ -43,30 +44,34 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end) start = ring->wr_offset; if (start < end) { - copy_size = (end - start) * sizeof(struct mhi_ring_element); - ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase + - (start * sizeof(struct mhi_ring_element)), - &ring->ring_cache[start], copy_size); + buf_info.size = (end - start) * sizeof(struct mhi_ring_element); + buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element)); + buf_info.dev_addr = &ring->ring_cache[start]; + + ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info); if (ret < 0) return ret; } else { - copy_size = (ring->ring_size - start) * sizeof(struct mhi_ring_element); - ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase + - (start * sizeof(struct mhi_ring_element)), - &ring->ring_cache[start], copy_size); + buf_info.size = (ring->ring_size - start) * sizeof(struct mhi_ring_element); + buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element)); + buf_info.dev_addr = &ring->ring_cache[start]; + + ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info); if (ret < 0) return ret; if (end) { - ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase, - &ring->ring_cache[0], - end * sizeof(struct mhi_ring_element)); + buf_info.host_addr = ring->rbase; + buf_info.dev_addr = &ring->ring_cache[0]; + buf_info.size = end * sizeof(struct mhi_ring_element); + + ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info); if (ret < 0) return ret; } } - dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, copy_size); + dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, buf_info.size); return 0; } @@ -102,6 +107,7 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e { struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ep_buf_info buf_info = {}; size_t old_offset = 0; u32 num_free_elem; __le64 rp; @@ -133,12 +139,11 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase); memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64)); - ret = mhi_cntrl->write_to_host(mhi_cntrl, el, ring->rbase + (old_offset * sizeof(*el)), - sizeof(*el)); - if (ret < 0) - return ret; + buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el)); + buf_info.dev_addr = el; + buf_info.size = sizeof(*el); - return 0; + return mhi_cntrl->write_sync(mhi_cntrl, &buf_info); } void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id) @@ -157,6 +162,15 @@ void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 } } +static void mhi_ep_raise_irq(struct work_struct *work) +{ + struct mhi_ep_ring *ring = container_of(work, struct mhi_ep_ring, intmodt_work.work); + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + + mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector); + WRITE_ONCE(ring->irq_pending, false); +} + int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, union mhi_ep_ring_ctx *ctx) { @@ -173,8 +187,13 @@ int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, if (ring->type == RING_TYPE_CH) ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex); - if (ring->type == RING_TYPE_ER) + if (ring->type == RING_TYPE_ER) { ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec); + ring->intmodt = FIELD_GET(EV_CTX_INTMODT_MASK, + le32_to_cpu(ring->ring_ctx->ev.intmod)); + + INIT_DELAYED_WORK(&ring->intmodt_work, mhi_ep_raise_irq); + } /* During ring init, both rp and wp are equal */ memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64)); @@ -201,6 +220,9 @@ int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring) { + if (ring->type == RING_TYPE_ER) + cancel_delayed_work_sync(&ring->intmodt_work); + ring->started = false; kfree(ring->ring_cache); ring->ring_cache = NULL; diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c index d2a19b07ccb8..edc0ec5a0933 100644 --- a/drivers/bus/mhi/host/boot.c +++ b/drivers/bus/mhi/host/boot.c @@ -365,12 +365,10 @@ error_alloc_mhi_buf: } static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl, - const struct firmware *firmware, + const u8 *buf, size_t remainder, struct image_info *img_info) { - size_t remainder = firmware->size; size_t to_cpy; - const u8 *buf = firmware->data; struct mhi_buf *mhi_buf = img_info->mhi_buf; struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; @@ -393,9 +391,10 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) struct device *dev = &mhi_cntrl->mhi_dev->dev; enum mhi_pm_state new_state; const char *fw_name; + const u8 *fw_data; void *buf; dma_addr_t dma_addr; - size_t size; + size_t size, fw_sz; int i, ret; if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { @@ -425,6 +424,20 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ? mhi_cntrl->edl_image : mhi_cntrl->fw_image; + /* check if the driver has already provided the firmware data */ + if (!fw_name && mhi_cntrl->fbc_download && + mhi_cntrl->fw_data && mhi_cntrl->fw_sz) { + if (!mhi_cntrl->sbl_size) { + dev_err(dev, "fw_data provided but no sbl_size\n"); + goto error_fw_load; + } + + size = mhi_cntrl->sbl_size; + fw_data = mhi_cntrl->fw_data; + fw_sz = mhi_cntrl->fw_sz; + goto skip_req_fw; + } + if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size || !mhi_cntrl->seg_len))) { dev_err(dev, @@ -444,6 +457,10 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) if (size > firmware->size) size = firmware->size; + fw_data = firmware->data; + fw_sz = firmware->size; + +skip_req_fw: buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, &dma_addr, GFP_KERNEL); if (!buf) { @@ -452,7 +469,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) } /* Download image using BHI */ - memcpy(buf, firmware->data, size); + memcpy(buf, fw_data, size); ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size); dma_free_coherent(mhi_cntrl->cntrl_dev, size, buf, dma_addr); @@ -464,7 +481,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) } /* Wait for ready since EDL image was loaded */ - if (fw_name == mhi_cntrl->edl_image) { + if (fw_name && fw_name == mhi_cntrl->edl_image) { release_firmware(firmware); goto fw_load_ready_state; } @@ -478,15 +495,14 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) * device transitioning into MHI READY state */ if (mhi_cntrl->fbc_download) { - ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, - firmware->size); + ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, fw_sz); if (ret) { release_firmware(firmware); goto error_fw_load; } /* Load the firmware into BHIE vec table */ - mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image); + mhi_firmware_copy(mhi_cntrl, fw_data, fw_sz, mhi_cntrl->fbc_image); } release_firmware(firmware); diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c index f72fcb66f408..65ceac1837f9 100644 --- a/drivers/bus/mhi/host/init.c +++ b/drivers/bus/mhi/host/init.c @@ -759,7 +759,7 @@ static int parse_ch_cfg(struct mhi_controller *mhi_cntrl, * so to avoid any memory possible allocation failures, vzalloc is * used here */ - mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan * + mhi_cntrl->mhi_chan = vcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan)); if (!mhi_cntrl->mhi_chan) return -ENOMEM; @@ -881,6 +881,7 @@ static int parse_config(struct mhi_controller *mhi_cntrl, if (!mhi_cntrl->timeout_ms) mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; + mhi_cntrl->ready_timeout_ms = config->ready_timeout_ms; mhi_cntrl->bounce_buf = config->use_bounce_buf; mhi_cntrl->buffer_len = config->buf_len; if (!mhi_cntrl->buffer_len) diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h index 2e139e76de4c..30ac415a3000 100644 --- a/drivers/bus/mhi/host/internal.h +++ b/drivers/bus/mhi/host/internal.h @@ -321,7 +321,7 @@ int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, u32 *out); int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, u32 offset, u32 mask, - u32 val, u32 delayus); + u32 val, u32 delayus, u32 timeout_ms); void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, u32 offset, u32 val); int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl, diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c index 74a75439c713..abb561db9ae1 100644 --- a/drivers/bus/mhi/host/main.c +++ b/drivers/bus/mhi/host/main.c @@ -40,10 +40,11 @@ int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, u32 offset, - u32 mask, u32 val, u32 delayus) + u32 mask, u32 val, u32 delayus, + u32 timeout_ms) { int ret; - u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus; + u32 out, retry = (timeout_ms * 1000) / delayus; while (retry--) { ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out); @@ -268,7 +269,8 @@ static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl, static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr) { - return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len; + return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len && + !(addr & (sizeof(struct mhi_ring_element) - 1)); } int mhi_destroy_device(struct device *dev, void *data) @@ -642,6 +644,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, mhi_del_ring_element(mhi_cntrl, tre_ring); local_rp = tre_ring->rp; + read_unlock_bh(&mhi_chan->lock); + /* notify client */ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); @@ -667,6 +671,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, kfree(buf_info->cb_buf); } } + + read_lock_bh(&mhi_chan->lock); } break; } /* CC_EOT */ @@ -938,7 +944,6 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, if (!mhi_chan->configured) break; parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); - event_quota--; } break; default: @@ -1123,17 +1128,15 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) return -EIO; - read_lock_irqsave(&mhi_cntrl->pm_lock, flags); - ret = mhi_is_ring_full(mhi_cntrl, tre_ring); - if (unlikely(ret)) { - ret = -EAGAIN; - goto exit_unlock; - } + if (unlikely(ret)) + return -EAGAIN; ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags); if (unlikely(ret)) - goto exit_unlock; + return ret; + + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); /* Packet is queued, take a usage ref to exit M3 if necessary * for host->device buffer, balanced put is done on buffer completion @@ -1153,7 +1156,6 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, if (dir == DMA_FROM_DEVICE) mhi_cntrl->runtime_put(mhi_cntrl); -exit_unlock: read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); return ret; @@ -1205,6 +1207,9 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, int eot, eob, chain, bei; int ret; + /* Protect accesses for reading and incrementing WP */ + write_lock_bh(&mhi_chan->lock); + buf_ring = &mhi_chan->buf_ring; tre_ring = &mhi_chan->tre_ring; @@ -1222,8 +1227,10 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, if (!info->pre_mapped) { ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); - if (ret) + if (ret) { + write_unlock_bh(&mhi_chan->lock); return ret; + } } eob = !!(flags & MHI_EOB); @@ -1240,6 +1247,8 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, mhi_add_ring_element(mhi_cntrl, tre_ring); mhi_add_ring_element(mhi_cntrl, buf_ring); + write_unlock_bh(&mhi_chan->lock); + return 0; } diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c index db0a0b062d8e..cd6cd14b3d29 100644 --- a/drivers/bus/mhi/host/pci_generic.c +++ b/drivers/bus/mhi/host/pci_generic.c @@ -212,6 +212,19 @@ struct mhi_pci_dev_info { .offload_channel = false, \ } +#define MHI_EVENT_CONFIG_SW_DATA(ev_ring, el_count) \ + { \ + .num_elements = el_count, \ + .irq_moderation_ms = 0, \ + .irq = (ev_ring) + 1, \ + .priority = 1, \ + .mode = MHI_DB_BRST_DISABLE, \ + .data_type = MHI_ER_DATA, \ + .hardware_event = false, \ + .client_managed = false, \ + .offload_channel = false, \ + } + #define MHI_EVENT_CONFIG_HW_DATA(ev_ring, el_count, ch_num) \ { \ .num_elements = el_count, \ @@ -237,8 +250,10 @@ static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = { MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0), MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0), MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0), - MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2), - MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3), + MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 64, 2), + MHI_CHANNEL_CONFIG_DL(47, "IP_SW0", 64, 3), + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 4), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 5), }; static struct mhi_event_config modem_qcom_v1_mhi_events[] = { @@ -246,9 +261,22 @@ static struct mhi_event_config modem_qcom_v1_mhi_events[] = { MHI_EVENT_CONFIG_CTRL(0, 64), /* DIAG dedicated event ring */ MHI_EVENT_CONFIG_DATA(1, 128), + /* Software channels dedicated event ring */ + MHI_EVENT_CONFIG_SW_DATA(2, 64), + MHI_EVENT_CONFIG_SW_DATA(3, 64), /* Hardware channels request dedicated hardware event rings */ - MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), - MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101) + MHI_EVENT_CONFIG_HW_DATA(4, 1024, 100), + MHI_EVENT_CONFIG_HW_DATA(5, 2048, 101) +}; + +static const struct mhi_controller_config modem_qcom_v2_mhiv_config = { + .max_channels = 128, + .timeout_ms = 8000, + .ready_timeout_ms = 50000, + .num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels), + .ch_cfg = modem_qcom_v1_mhi_channels, + .num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events), + .event_cfg = modem_qcom_v1_mhi_events, }; static const struct mhi_controller_config modem_qcom_v1_mhiv_config = { @@ -260,6 +288,16 @@ static const struct mhi_controller_config modem_qcom_v1_mhiv_config = { .event_cfg = modem_qcom_v1_mhi_events, }; +static const struct mhi_pci_dev_info mhi_qcom_sdx75_info = { + .name = "qcom-sdx75m", + .fw = "qcom/sdx75m/xbl.elf", + .edl = "qcom/sdx75m/edl.mbn", + .config = &modem_qcom_v2_mhiv_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .sideband_wake = false, +}; + static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = { .name = "qcom-sdx65m", .fw = "qcom/sdx65m/xbl.elf", @@ -334,6 +372,16 @@ static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = { .sideband_wake = true, }; +static const struct mhi_pci_dev_info mhi_quectel_rm5xx_info = { + .name = "quectel-rm5xx", + .edl = "qcom/prog_firehose_sdx6x.elf", + .config = &modem_quectel_em1xx_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = true, +}; + static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = { MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0), MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0), @@ -567,12 +615,25 @@ static const struct pci_device_id mhi_pci_id_table[] = { /* Telit FN990 */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010), .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info }, + /* Telit FE990 */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015), + .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info }, { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info }, + { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309), + .driver_data = (kernel_ulong_t) &mhi_qcom_sdx75_info }, { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, + /* RM520N-GL (sdx6x), eSIM */ + { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1004), + .driver_data = (kernel_ulong_t) &mhi_quectel_rm5xx_info }, + /* RM520N-GL (sdx6x), Lenovo variant */ + { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1007), + .driver_data = (kernel_ulong_t) &mhi_quectel_rm5xx_info }, + { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x100d), /* EM160R-GL (sdx24) */ + .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x2001), /* EM120R-GL for FCCL (sdx24) */ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, /* T99W175 (sdx55), Both for eSIM and Non-eSIM */ @@ -605,6 +666,12 @@ static const struct pci_device_id mhi_pci_id_table[] = { /* T99W510 (sdx24), variant 3 */ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f2), .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info }, + /* DW5932e-eSIM (sdx62), With eSIM */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f5), + .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info }, + /* DW5932e (sdx62), Non-eSIM */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f9), + .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info }, /* MV31-W (Cinterion) */ { PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b3), .driver_data = (kernel_ulong_t) &mhi_mv31_info }, diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c index 083459028a4b..a2f2feef1476 100644 --- a/drivers/bus/mhi/host/pm.c +++ b/drivers/bus/mhi/host/pm.c @@ -163,6 +163,7 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) enum mhi_pm_state cur_state; struct device *dev = &mhi_cntrl->mhi_dev->dev; u32 interval_us = 25000; /* poll register field every 25 milliseconds */ + u32 timeout_ms; int ret, i; /* Check if device entered error state */ @@ -173,14 +174,18 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) /* Wait for RESET to be cleared and READY bit to be set by the device */ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_RESET_MASK, 0, interval_us); + MHICTRL_RESET_MASK, 0, interval_us, + mhi_cntrl->timeout_ms); if (ret) { dev_err(dev, "Device failed to clear MHI Reset\n"); return ret; } + timeout_ms = mhi_cntrl->ready_timeout_ms ? + mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms; ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, - MHISTATUS_READY_MASK, 1, interval_us); + MHISTATUS_READY_MASK, 1, interval_us, + timeout_ms); if (ret) { dev_err(dev, "Device failed to enter MHI Ready\n"); return ret; @@ -470,12 +475,16 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) /* Trigger MHI RESET so that the device will not access host memory */ if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { + /* Skip MHI RESET if in RDDM state */ + if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM) + goto skip_mhi_reset; + dev_dbg(dev, "Triggering MHI Reset in device\n"); mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); /* Wait for the reset bit to be cleared by the device */ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_RESET_MASK, 0, 25000); + MHICTRL_RESET_MASK, 0, 25000, mhi_cntrl->timeout_ms); if (ret) dev_err(dev, "Device failed to clear MHI Reset\n"); @@ -488,13 +497,14 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) { /* wait for ready to be set */ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, - MHISTATUS, - MHISTATUS_READY_MASK, 1, 25000); + MHISTATUS, MHISTATUS_READY_MASK, + 1, 25000, mhi_cntrl->timeout_ms); if (ret) dev_err(dev, "Device failed to enter READY state\n"); } } +skip_mhi_reset: dev_dbg(dev, "Waiting for all pending event ring processing to complete\n"); mhi_event = mhi_cntrl->mhi_event; @@ -1106,7 +1116,8 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl) if (state == MHI_STATE_SYS_ERR) { mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_RESET_MASK, 0, interval_us); + MHICTRL_RESET_MASK, 0, interval_us, + mhi_cntrl->timeout_ms); if (ret) { dev_info(dev, "Failed to reset MHI due to syserr state\n"); goto error_exit; @@ -1197,14 +1208,18 @@ EXPORT_SYMBOL_GPL(mhi_power_down); int mhi_sync_power_up(struct mhi_controller *mhi_cntrl) { int ret = mhi_async_power_up(mhi_cntrl); + u32 timeout_ms; if (ret) return ret; + /* Some devices need more time to set ready during power up */ + timeout_ms = mhi_cntrl->ready_timeout_ms ? + mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms; wait_event_timeout(mhi_cntrl->state_event, MHI_IN_MISSION_MODE(mhi_cntrl->ee) || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), - msecs_to_jiffies(mhi_cntrl->timeout_ms)); + msecs_to_jiffies(timeout_ms)); ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT; if (ret) diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c index 5eb0fe73ddc4..641c1a6adc8a 100644 --- a/drivers/bus/moxtet.c +++ b/drivers/bus/moxtet.c @@ -102,7 +102,7 @@ static int moxtet_match(struct device *dev, struct device_driver *drv) return 0; } -static struct bus_type moxtet_bus_type = { +static const struct bus_type moxtet_bus_type = { .name = "moxtet", .dev_groups = moxtet_dev_groups, .match = moxtet_match, @@ -755,7 +755,7 @@ static int moxtet_irq_setup(struct moxtet *moxtet) moxtet->irq.masked = ~0; ret = request_threaded_irq(moxtet->dev_irq, NULL, moxtet_irq_thread_fn, - IRQF_ONESHOT, "moxtet", moxtet); + IRQF_SHARED | IRQF_ONESHOT, "moxtet", moxtet); if (ret < 0) goto err_free; @@ -830,6 +830,12 @@ static void moxtet_remove(struct spi_device *spi) mutex_destroy(&moxtet->lock); } +static const struct spi_device_id moxtet_spi_ids[] = { + { "moxtet" }, + { }, +}; +MODULE_DEVICE_TABLE(spi, moxtet_spi_ids); + static const struct of_device_id moxtet_dt_ids[] = { { .compatible = "cznic,moxtet" }, {}, @@ -841,6 +847,7 @@ static struct spi_driver moxtet_spi_driver = { .name = "moxtet", .of_match_table = moxtet_dt_ids, }, + .id_table = moxtet_spi_ids, .probe = moxtet_probe, .remove = moxtet_remove, }; diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c index e02d0656242b..7d7479ba0a75 100644 --- a/drivers/bus/omap-ocp2scp.c +++ b/drivers/bus/omap-ocp2scp.c @@ -84,12 +84,10 @@ err0: return ret; } -static int omap_ocp2scp_remove(struct platform_device *pdev) +static void omap_ocp2scp_remove(struct platform_device *pdev) { pm_runtime_disable(&pdev->dev); device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices); - - return 0; } #ifdef CONFIG_OF @@ -103,7 +101,7 @@ MODULE_DEVICE_TABLE(of, omap_ocp2scp_id_table); static struct platform_driver omap_ocp2scp_driver = { .probe = omap_ocp2scp_probe, - .remove = omap_ocp2scp_remove, + .remove_new = omap_ocp2scp_remove, .driver = { .name = "omap-ocp2scp", .of_match_table = of_match_ptr(omap_ocp2scp_id_table), diff --git a/drivers/bus/omap_l3_smx.c b/drivers/bus/omap_l3_smx.c index bb1606f5ce2d..ee6d29925e4d 100644 --- a/drivers/bus/omap_l3_smx.c +++ b/drivers/bus/omap_l3_smx.c @@ -15,7 +15,6 @@ #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_device.h> #include "omap_l3_smx.h" @@ -166,19 +165,10 @@ static irqreturn_t omap3_l3_app_irq(int irq, void *_l3) irqreturn_t ret = IRQ_NONE; int_type = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR; - if (!int_type) { + if (!int_type) status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_0); - /* - * if we have a timeout error, there's nothing we can - * do besides rebooting the board. So let's BUG on any - * of such errors and handle the others. timeout error - * is severe and not expected to occur. - */ - BUG_ON(status & L3_STATUS_0_TIMEOUT_MASK); - } else { + else status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_1); - /* No timeout error for debug sources */ - } /* identify the error source */ err_source = __ffs(status); @@ -190,6 +180,14 @@ static irqreturn_t omap3_l3_app_irq(int irq, void *_l3) ret |= omap3_l3_block_irq(l3, error, error_addr); } + /* + * if we have a timeout error, there's nothing we can + * do besides rebooting the board. So let's BUG on any + * of such errors and handle the others. timeout error + * is severe and not expected to occur. + */ + BUG_ON(!int_type && status & L3_STATUS_0_TIMEOUT_MASK); + /* Clear the status register */ clear = (L3_AGENT_STATUS_CLEAR_IA << int_type) | L3_AGENT_STATUS_CLEAR_TA; @@ -263,7 +261,7 @@ err0: return ret; } -static int omap3_l3_remove(struct platform_device *pdev) +static void omap3_l3_remove(struct platform_device *pdev) { struct omap3_l3 *l3 = platform_get_drvdata(pdev); @@ -271,13 +269,11 @@ static int omap3_l3_remove(struct platform_device *pdev) free_irq(l3->debug_irq, l3); iounmap(l3->rt); kfree(l3); - - return 0; } static struct platform_driver omap3_l3_driver = { .probe = omap3_l3_probe, - .remove = omap3_l3_remove, + .remove_new = omap3_l3_remove, .driver = { .name = "omap_l3_smx", .of_match_table = of_match_ptr(omap3_l3_match), diff --git a/drivers/bus/qcom-ssc-block-bus.c b/drivers/bus/qcom-ssc-block-bus.c index 3fef18a43c01..5931974a21fa 100644 --- a/drivers/bus/qcom-ssc-block-bus.c +++ b/drivers/bus/qcom-ssc-block-bus.c @@ -350,7 +350,7 @@ static int qcom_ssc_block_bus_probe(struct platform_device *pdev) return 0; } -static int qcom_ssc_block_bus_remove(struct platform_device *pdev) +static void qcom_ssc_block_bus_remove(struct platform_device *pdev) { struct qcom_ssc_block_bus_data *data = platform_get_drvdata(pdev); @@ -363,8 +363,6 @@ static int qcom_ssc_block_bus_remove(struct platform_device *pdev) qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds); pm_runtime_disable(&pdev->dev); pm_clk_destroy(&pdev->dev); - - return 0; } static const struct of_device_id qcom_ssc_block_bus_of_match[] = { @@ -375,7 +373,7 @@ MODULE_DEVICE_TABLE(of, qcom_ssc_block_bus_of_match); static struct platform_driver qcom_ssc_block_bus_driver = { .probe = qcom_ssc_block_bus_probe, - .remove = qcom_ssc_block_bus_remove, + .remove_new = qcom_ssc_block_bus_remove, .driver = { .name = "qcom-ssc-block-bus", .of_match_table = qcom_ssc_block_bus_of_match, diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c index 4da77ca7b75a..50870c827889 100644 --- a/drivers/bus/simple-pm-bus.c +++ b/drivers/bus/simple-pm-bus.c @@ -11,6 +11,8 @@ #include <linux/clk.h> #include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> @@ -72,17 +74,16 @@ static int simple_pm_bus_probe(struct platform_device *pdev) return 0; } -static int simple_pm_bus_remove(struct platform_device *pdev) +static void simple_pm_bus_remove(struct platform_device *pdev) { const void *data = of_device_get_match_data(&pdev->dev); if (pdev->driver_override || data) - return 0; + return; dev_dbg(&pdev->dev, "%s\n", __func__); pm_runtime_disable(&pdev->dev); - return 0; } static int simple_pm_bus_runtime_suspend(struct device *dev) @@ -127,7 +128,7 @@ MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match); static struct platform_driver simple_pm_bus_driver = { .probe = simple_pm_bus_probe, - .remove = simple_pm_bus_remove, + .remove_new = simple_pm_bus_remove, .driver = { .name = "simple-pm-bus", .of_match_table = simple_pm_bus_of_match, diff --git a/drivers/bus/sun50i-de2.c b/drivers/bus/sun50i-de2.c index 414f29cdedf0..3339311ce068 100644 --- a/drivers/bus/sun50i-de2.c +++ b/drivers/bus/sun50i-de2.c @@ -24,10 +24,9 @@ static int sun50i_de2_bus_probe(struct platform_device *pdev) return 0; } -static int sun50i_de2_bus_remove(struct platform_device *pdev) +static void sun50i_de2_bus_remove(struct platform_device *pdev) { sunxi_sram_release(&pdev->dev); - return 0; } static const struct of_device_id sun50i_de2_bus_of_match[] = { @@ -37,7 +36,7 @@ static const struct of_device_id sun50i_de2_bus_of_match[] = { static struct platform_driver sun50i_de2_bus_driver = { .probe = sun50i_de2_bus_probe, - .remove = sun50i_de2_bus_remove, + .remove_new = sun50i_de2_bus_remove, .driver = { .name = "sun50i-de2-bus", .of_match_table = sun50i_de2_bus_of_match, diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index 696c0aefb0ca..fd3e9d82340a 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -39,7 +39,7 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/of_irq.h> -#include <linux/of_platform.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/pm_runtime.h> @@ -746,7 +746,6 @@ static int sunxi_rsb_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - struct resource *r; struct sunxi_rsb *rsb; u32 clk_freq = 3000000; int irq, ret; @@ -766,8 +765,7 @@ static int sunxi_rsb_probe(struct platform_device *pdev) rsb->dev = dev; rsb->clk_freq = clk_freq; platform_set_drvdata(pdev, rsb); - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - rsb->regs = devm_ioremap_resource(dev, r); + rsb->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(rsb->regs)) return PTR_ERR(rsb->regs); @@ -819,15 +817,13 @@ static int sunxi_rsb_probe(struct platform_device *pdev) return 0; } -static int sunxi_rsb_remove(struct platform_device *pdev) +static void sunxi_rsb_remove(struct platform_device *pdev) { struct sunxi_rsb *rsb = platform_get_drvdata(pdev); device_for_each_child(rsb->dev, NULL, sunxi_rsb_remove_devices); pm_runtime_disable(&pdev->dev); sunxi_rsb_hw_exit(rsb); - - return 0; } static const struct dev_pm_ops sunxi_rsb_dev_pm_ops = { @@ -844,7 +840,7 @@ MODULE_DEVICE_TABLE(of, sunxi_rsb_of_match_table); static struct platform_driver sunxi_rsb_driver = { .probe = sunxi_rsb_probe, - .remove = sunxi_rsb_remove, + .remove_new = sunxi_rsb_remove, .driver = { .name = RSB_CTRL_NAME, .of_match_table = sunxi_rsb_of_match_table, diff --git a/drivers/bus/tegra-aconnect.c b/drivers/bus/tegra-aconnect.c index ac58142301f4..de80008bff92 100644 --- a/drivers/bus/tegra-aconnect.c +++ b/drivers/bus/tegra-aconnect.c @@ -53,11 +53,9 @@ static int tegra_aconnect_probe(struct platform_device *pdev) return 0; } -static int tegra_aconnect_remove(struct platform_device *pdev) +static void tegra_aconnect_remove(struct platform_device *pdev) { pm_runtime_disable(&pdev->dev); - - return 0; } static int tegra_aconnect_runtime_resume(struct device *dev) @@ -106,7 +104,7 @@ MODULE_DEVICE_TABLE(of, tegra_aconnect_of_match); static struct platform_driver tegra_aconnect_driver = { .probe = tegra_aconnect_probe, - .remove = tegra_aconnect_remove, + .remove_new = tegra_aconnect_remove, .driver = { .name = "tegra-aconnect", .of_match_table = tegra_aconnect_of_match, diff --git a/drivers/bus/tegra-gmi.c b/drivers/bus/tegra-gmi.c index e3506ef37051..f5d6414df9f2 100644 --- a/drivers/bus/tegra-gmi.c +++ b/drivers/bus/tegra-gmi.c @@ -211,7 +211,6 @@ static int tegra_gmi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct tegra_gmi *gmi; - struct resource *res; int err; gmi = devm_kzalloc(dev, sizeof(*gmi), GFP_KERNEL); @@ -221,8 +220,7 @@ static int tegra_gmi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, gmi); gmi->dev = dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - gmi->base = devm_ioremap_resource(dev, res); + gmi->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(gmi->base)) return PTR_ERR(gmi->base); @@ -260,14 +258,12 @@ static int tegra_gmi_probe(struct platform_device *pdev) return 0; } -static int tegra_gmi_remove(struct platform_device *pdev) +static void tegra_gmi_remove(struct platform_device *pdev) { struct tegra_gmi *gmi = platform_get_drvdata(pdev); of_platform_depopulate(gmi->dev); tegra_gmi_disable(gmi); - - return 0; } static int __maybe_unused tegra_gmi_runtime_resume(struct device *dev) @@ -307,7 +303,7 @@ MODULE_DEVICE_TABLE(of, tegra_gmi_id_table); static struct platform_driver tegra_gmi_driver = { .probe = tegra_gmi_probe, - .remove = tegra_gmi_remove, + .remove_new = tegra_gmi_remove, .driver = { .name = "tegra-gmi", .of_match_table = tegra_gmi_id_table, diff --git a/drivers/bus/ti-pwmss.c b/drivers/bus/ti-pwmss.c index e9c26c94251b..4969c556e752 100644 --- a/drivers/bus/ti-pwmss.c +++ b/drivers/bus/ti-pwmss.c @@ -10,7 +10,7 @@ #include <linux/io.h> #include <linux/err.h> #include <linux/pm_runtime.h> -#include <linux/of_device.h> +#include <linux/of_platform.h> static const struct of_device_id pwmss_of_match[] = { { .compatible = "ti,am33xx-pwmss" }, @@ -33,10 +33,9 @@ static int pwmss_probe(struct platform_device *pdev) return ret; } -static int pwmss_remove(struct platform_device *pdev) +static void pwmss_remove(struct platform_device *pdev) { pm_runtime_disable(&pdev->dev); - return 0; } static struct platform_driver pwmss_driver = { @@ -45,7 +44,7 @@ static struct platform_driver pwmss_driver = { .of_match_table = pwmss_of_match, }, .probe = pwmss_probe, - .remove = pwmss_remove, + .remove_new = pwmss_remove, }; module_platform_driver(pwmss_driver); diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 4cb23b9e06ea..245e5e827d0d 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -38,6 +38,7 @@ enum sysc_soc { SOC_2420, SOC_2430, SOC_3430, + SOC_AM35, SOC_3630, SOC_4430, SOC_4460, @@ -109,6 +110,7 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = { * @cookie: data used by legacy platform callbacks * @name: name if available * @revision: interconnect target module revision + * @sysconfig: saved sysconfig register value * @reserved: target module is reserved and already in use * @enabled: sysc runtime enabled status * @needs_resume: runtime resume needed on resume from suspend @@ -1096,6 +1098,11 @@ static int sysc_enable_module(struct device *dev) if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_SIDLE_ACT)) { best_mode = SYSC_IDLE_NO; + + /* Clear WAKEUP */ + if (regbits->enwkup_shift >= 0 && + ddata->cfg.sysc_val & BIT(regbits->enwkup_shift)) + reg &= ~BIT(regbits->enwkup_shift); } else { best_mode = fls(ddata->cfg.sidlemodes) - 1; if (best_mode > SYSC_IDLE_MASK) { @@ -1223,6 +1230,13 @@ set_sidle: } } + if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE_ACT) { + /* Set WAKEUP */ + if (regbits->enwkup_shift >= 0 && + ddata->cfg.sysc_val & BIT(regbits->enwkup_shift)) + reg |= BIT(regbits->enwkup_shift); + } + reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift); reg |= best_mode << regbits->sidle_shift; if (regbits->autoidle_shift >= 0 && @@ -1517,14 +1531,16 @@ struct sysc_revision_quirk { static const struct sysc_revision_quirk sysc_revision_quirks[] = { /* These drivers need to be fixed to not use pm_runtime_irq_safe() */ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff, - SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), + SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, - SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), + SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), /* Uarts on omap4 and later */ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff, - SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), + SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff, - SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), + SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), + SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff, + SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), /* Quirks that need to be set based on the module address */ SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff, @@ -1859,7 +1875,7 @@ static void sysc_pre_reset_quirk_dss(struct sysc *ddata) dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n", __func__, val, irq_mask); - if (sysc_soc->soc == SOC_3430) { + if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35) { /* Clear DSS_SDI_CONTROL */ sysc_write(ddata, 0x44, 0); @@ -2142,14 +2158,23 @@ static int sysc_reset(struct sysc *ddata) sysc_val = sysc_read_sysconfig(ddata); sysc_val |= sysc_mask; sysc_write(ddata, sysc_offset, sysc_val); - /* Flush posted write */ + + /* + * Some devices need a delay before reading registers + * after reset. Presumably a srst_udelay is not needed + * for devices that use a rstctrl register reset. + */ + if (ddata->cfg.srst_udelay) + fsleep(ddata->cfg.srst_udelay); + + /* + * Flush posted write. For devices needing srst_udelay + * this should trigger an interconnect error if the + * srst_udelay value is needed but not configured. + */ sysc_val = sysc_read_sysconfig(ddata); } - if (ddata->cfg.srst_udelay) - usleep_range(ddata->cfg.srst_udelay, - ddata->cfg.srst_udelay * 2); - if (ddata->post_reset_quirk) ddata->post_reset_quirk(ddata); @@ -3022,6 +3047,7 @@ static void ti_sysc_idle(struct work_struct *work) static const struct soc_device_attribute sysc_soc_match[] = { SOC_FLAG("OMAP242*", SOC_2420), SOC_FLAG("OMAP243*", SOC_2430), + SOC_FLAG("AM35*", SOC_AM35), SOC_FLAG("OMAP3[45]*", SOC_3430), SOC_FLAG("OMAP3[67]*", SOC_3630), SOC_FLAG("OMAP443*", SOC_4430), @@ -3106,7 +3132,7 @@ static int sysc_init_static_data(struct sysc *ddata) match = soc_device_match(sysc_soc_match); if (match && match->data) - sysc_soc->soc = (int)match->data; + sysc_soc->soc = (enum sysc_soc)(uintptr_t)match->data; /* * Check and warn about possible old incomplete dtb. We now want to see @@ -3226,7 +3252,7 @@ static int sysc_check_active_timer(struct sysc *ddata) * can be dropped if we stop supporting old beagleboard revisions * A to B4 at some point. */ - if (sysc_soc->soc == SOC_3430) + if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35) error = -ENXIO; else error = -EBUSY; @@ -3371,7 +3397,7 @@ unprepare: return error; } -static int sysc_remove(struct platform_device *pdev) +static void sysc_remove(struct platform_device *pdev) { struct sysc *ddata = platform_get_drvdata(pdev); int error; @@ -3396,8 +3422,6 @@ static int sysc_remove(struct platform_device *pdev) unprepare: sysc_unprepare(ddata); - - return 0; } static const struct of_device_id sysc_match[] = { @@ -3423,7 +3447,7 @@ MODULE_DEVICE_TABLE(of, sysc_match); static struct platform_driver sysc_driver = { .probe = sysc_probe, - .remove = sysc_remove, + .remove_new = sysc_remove, .driver = { .name = "ti-sysc", .of_match_table = sysc_match, diff --git a/drivers/bus/ts-nbus.c b/drivers/bus/ts-nbus.c index 38c886dc2ed6..4fa932cb0915 100644 --- a/drivers/bus/ts-nbus.c +++ b/drivers/bus/ts-nbus.c @@ -331,7 +331,7 @@ static int ts_nbus_probe(struct platform_device *pdev) return 0; } -static int ts_nbus_remove(struct platform_device *pdev) +static void ts_nbus_remove(struct platform_device *pdev) { struct ts_nbus *ts_nbus = dev_get_drvdata(&pdev->dev); @@ -339,8 +339,6 @@ static int ts_nbus_remove(struct platform_device *pdev) mutex_lock(&ts_nbus->lock); pwm_disable(ts_nbus->pwm); mutex_unlock(&ts_nbus->lock); - - return 0; } static const struct of_device_id ts_nbus_of_match[] = { @@ -351,7 +349,7 @@ MODULE_DEVICE_TABLE(of, ts_nbus_of_match); static struct platform_driver ts_nbus_driver = { .probe = ts_nbus_probe, - .remove = ts_nbus_remove, + .remove_new = ts_nbus_remove, .driver = { .name = "ts_nbus", .of_match_table = ts_nbus_of_match, diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c index 472a570bd53a..d2c7ada90186 100644 --- a/drivers/bus/vexpress-config.c +++ b/drivers/bus/vexpress-config.c @@ -54,7 +54,7 @@ struct vexpress_syscfg_func { struct vexpress_syscfg *syscfg; struct regmap *regmap; int num_templates; - u32 template[]; /* Keep it last! */ + u32 template[] __counted_by(num_templates); /* Keep it last! */ }; struct vexpress_config_bridge_ops { @@ -350,7 +350,6 @@ static struct vexpress_config_bridge_ops vexpress_syscfg_bridge_ops = { static int vexpress_syscfg_probe(struct platform_device *pdev) { struct vexpress_syscfg *syscfg; - struct resource *res; struct vexpress_config_bridge *bridge; struct device_node *node; int master; @@ -362,8 +361,7 @@ static int vexpress_syscfg_probe(struct platform_device *pdev) syscfg->dev = &pdev->dev; INIT_LIST_HEAD(&syscfg->funcs); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - syscfg->base = devm_ioremap_resource(&pdev->dev, res); + syscfg->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(syscfg->base)) return PTR_ERR(syscfg->base); |