diff options
Diffstat (limited to 'drivers/net/ipa')
-rw-r--r-- | drivers/net/ipa/Kconfig | 2 | ||||
-rw-r--r-- | drivers/net/ipa/gsi.c | 311 | ||||
-rw-r--r-- | drivers/net/ipa/ipa.h | 4 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_clock.c | 194 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_cmd.c | 45 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_cmd.h | 24 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_data-sc7180.c | 38 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_data-sdm845.c | 38 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_data.h | 26 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_endpoint.c | 89 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_main.c | 39 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_mem.c | 4 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_modem.c | 1 |
13 files changed, 461 insertions, 354 deletions
diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig index 10a0e041ee77..b68f1289b89e 100644 --- a/drivers/net/ipa/Kconfig +++ b/drivers/net/ipa/Kconfig @@ -1,6 +1,6 @@ config QCOM_IPA tristate "Qualcomm IPA support" - depends on 64BIT && NET + depends on 64BIT && NET && QCOM_SMEM depends on ARCH_QCOM || COMPILE_TEST depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST) select QCOM_MDT_LOADER if ARCH_QCOM diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index 14d9a791924b..53640447bf12 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -89,9 +89,9 @@ /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */ #define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */ -#define GSI_CMD_TIMEOUT 5 /* seconds */ +#define GSI_CMD_TIMEOUT 50 /* milliseconds */ -#define GSI_CHANNEL_STOP_RX_RETRIES 10 +#define GSI_CHANNEL_STOP_RETRIES 10 #define GSI_CHANNEL_MODEM_HALT_RETRIES 10 #define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */ @@ -220,7 +220,59 @@ static void gsi_irq_teardown(struct gsi *gsi) /* Nothing to do */ } -static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id) +/* Event ring commands are performed one at a time. Their completion + * is signaled by the event ring control GSI interrupt type, which is + * only enabled when we issue an event ring command. Only the event + * ring being operated on has this interrupt enabled. + */ +static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id) +{ + u32 val = BIT(evt_ring_id); + + /* There's a small chance that a previous command completed + * after the interrupt was disabled, so make sure we have no + * pending interrupts before we enable them. + */ + iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET); + + iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); + gsi_irq_type_enable(gsi, GSI_EV_CTRL); +} + +/* Disable event ring control interrupts */ +static void gsi_irq_ev_ctrl_disable(struct gsi *gsi) +{ + gsi_irq_type_disable(gsi, GSI_EV_CTRL); + iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); +} + +/* Channel commands are performed one at a time. Their completion is + * signaled by the channel control GSI interrupt type, which is only + * enabled when we issue a channel command. Only the channel being + * operated on has this interrupt enabled. + */ +static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id) +{ + u32 val = BIT(channel_id); + + /* There's a small chance that a previous command completed + * after the interrupt was disabled, so make sure we have no + * pending interrupts before we enable them. + */ + iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET); + + iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); + gsi_irq_type_enable(gsi, GSI_CH_CTRL); +} + +/* Disable channel control interrupts */ +static void gsi_irq_ch_ctrl_disable(struct gsi *gsi) +{ + gsi_irq_type_disable(gsi, GSI_CH_CTRL); + iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); +} + +static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id) { bool enable_ieob = !gsi->ieob_enabled_bitmap; u32 val; @@ -234,11 +286,11 @@ static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id) gsi_irq_type_enable(gsi, GSI_IEOB); } -static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id) +static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask) { u32 val; - gsi->ieob_enabled_bitmap &= ~BIT(evt_ring_id); + gsi->ieob_enabled_bitmap &= ~event_mask; /* Disable the interrupt type if this was the last enabled channel */ if (!gsi->ieob_enabled_bitmap) @@ -248,6 +300,11 @@ static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id) iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); } +static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id) +{ + gsi_irq_ieob_disable(gsi, BIT(evt_ring_id)); +} + /* Enable all GSI_interrupt types */ static void gsi_irq_enable(struct gsi *gsi) { @@ -307,11 +364,13 @@ static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset) static bool gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion) { + unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT); + reinit_completion(completion); iowrite32(val, gsi->virt + reg); - return !!wait_for_completion_timeout(completion, GSI_CMD_TIMEOUT * HZ); + return !!wait_for_completion_timeout(completion, timeout); } /* Return the hardware's notion of the current state of an event ring */ @@ -326,41 +385,26 @@ gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id) } /* Issue an event ring command and wait for it to complete */ -static void evt_ring_command(struct gsi *gsi, u32 evt_ring_id, - enum gsi_evt_cmd_opcode opcode) +static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id, + enum gsi_evt_cmd_opcode opcode) { struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; struct completion *completion = &evt_ring->completion; struct device *dev = gsi->dev; - bool success; + bool timeout; u32 val; - /* We only perform one event ring command at a time, and event - * control interrupts should only occur when such a command - * is issued here. Only permit *this* event ring to trigger - * an interrupt, and only enable the event control IRQ type - * when we expect it to occur. - * - * There's a small chance that a previous command completed - * after the interrupt was disabled, so make sure we have no - * pending interrupts before we enable them. - */ - iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET); - - val = BIT(evt_ring_id); - iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); - gsi_irq_type_enable(gsi, GSI_EV_CTRL); + /* Enable the completion interrupt for the command */ + gsi_irq_ev_ctrl_enable(gsi, evt_ring_id); val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK); val |= u32_encode_bits(opcode, EV_OPCODE_FMASK); - success = gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion); + timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion); - /* Disable the interrupt again */ - gsi_irq_type_disable(gsi, GSI_EV_CTRL); - iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); + gsi_irq_ev_ctrl_disable(gsi); - if (success) + if (!timeout) return; dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n", @@ -380,7 +424,7 @@ static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id) return -EINVAL; } - evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE); + gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE); /* If successful the event ring state will have changed */ if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED) @@ -405,7 +449,7 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id) return; } - evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET); + gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET); /* If successful the event ring state will have changed */ if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED) @@ -426,7 +470,7 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id) return; } - evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC); + gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC); /* If successful the event ring state will have changed */ if (evt_ring->state == GSI_EVT_RING_STATE_NOT_ALLOCATED) @@ -440,7 +484,7 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id) static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel) { u32 channel_id = gsi_channel_id(channel); - void *virt = channel->gsi->virt; + void __iomem *virt = channel->gsi->virt; u32 val; val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); @@ -456,34 +500,19 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) u32 channel_id = gsi_channel_id(channel); struct gsi *gsi = channel->gsi; struct device *dev = gsi->dev; - bool success; + bool timeout; u32 val; - /* We only perform one channel command at a time, and channel - * control interrupts should only occur when such a command is - * issued here. So we only permit *this* channel to trigger - * an interrupt and only enable the channel control IRQ type - * when we expect it to occur. - * - * There's a small chance that a previous command completed - * after the interrupt was disabled, so make sure we have no - * pending interrupts before we enable them. - */ - iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET); - - val = BIT(channel_id); - iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); - gsi_irq_type_enable(gsi, GSI_CH_CTRL); + /* Enable the completion interrupt for the command */ + gsi_irq_ch_ctrl_enable(gsi, channel_id); val = u32_encode_bits(channel_id, CH_CHID_FMASK); val |= u32_encode_bits(opcode, CH_OPCODE_FMASK); - success = gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion); + timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion); - /* Disable the interrupt again */ - gsi_irq_type_disable(gsi, GSI_CH_CTRL); - iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); + gsi_irq_ch_ctrl_disable(gsi); - if (success) + if (!timeout) return; dev_err(dev, "GSI command %u for channel %u timed out, state %u\n", @@ -589,7 +618,8 @@ static void gsi_channel_reset_command(struct gsi_channel *channel) struct device *dev = channel->gsi->dev; enum gsi_channel_state state; - msleep(1); /* A short delay is required before a RESET command */ + /* A short delay is required before a RESET command */ + usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); state = gsi_channel_state(channel); if (state != GSI_CHANNEL_STATE_STOPPED && @@ -695,22 +725,38 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id) gsi_evt_ring_doorbell(gsi, evt_ring_id, 0); } -/* Return the last (most recent) transaction completed on a channel. */ +/* Find the transaction whose completion indicates a channel is quiesced */ static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel) { struct gsi_trans_info *trans_info = &channel->trans_info; + const struct list_head *list; struct gsi_trans *trans; spin_lock_bh(&trans_info->spinlock); - if (!list_empty(&trans_info->complete)) - trans = list_last_entry(&trans_info->complete, - struct gsi_trans, links); - else if (!list_empty(&trans_info->polled)) - trans = list_last_entry(&trans_info->polled, - struct gsi_trans, links); - else - trans = NULL; + /* There is a small chance a TX transaction got allocated just + * before we disabled transmits, so check for that. + */ + if (channel->toward_ipa) { + list = &trans_info->alloc; + if (!list_empty(list)) + goto done; + list = &trans_info->pending; + if (!list_empty(list)) + goto done; + } + + /* Otherwise (TX or RX) we want to wait for anything that + * has completed, or has been polled but not released yet. + */ + list = &trans_info->complete; + if (!list_empty(list)) + goto done; + list = &trans_info->polled; + if (list_empty(list)) + list = NULL; +done: + trans = list ? list_last_entry(list, struct gsi_trans, links) : NULL; /* Caller will wait for this, so take a reference */ if (trans) @@ -734,24 +780,6 @@ static void gsi_channel_trans_quiesce(struct gsi_channel *channel) } } -/* Stop channel activity. Transactions may not be allocated until thawed. */ -static void gsi_channel_freeze(struct gsi_channel *channel) -{ - gsi_channel_trans_quiesce(channel); - - napi_disable(&channel->napi); - - gsi_irq_ieob_disable(channel->gsi, channel->evt_ring_id); -} - -/* Allow transactions to be used on the channel again. */ -static void gsi_channel_thaw(struct gsi_channel *channel) -{ - gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id); - - napi_enable(&channel->napi); -} - /* Program a channel for use */ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) { @@ -843,34 +871,47 @@ static void gsi_channel_deprogram(struct gsi_channel *channel) /* Nothing to do */ } -/* Start an allocated GSI channel */ -int gsi_channel_start(struct gsi *gsi, u32 channel_id) +static int __gsi_channel_start(struct gsi_channel *channel, bool start) { - struct gsi_channel *channel = &gsi->channel[channel_id]; + struct gsi *gsi = channel->gsi; int ret; + if (!start) + return 0; + mutex_lock(&gsi->mutex); ret = gsi_channel_start_command(channel); mutex_unlock(&gsi->mutex); - gsi_channel_thaw(channel); - return ret; } -/* Stop a started channel */ -int gsi_channel_stop(struct gsi *gsi, u32 channel_id) +/* Start an allocated GSI channel */ +int gsi_channel_start(struct gsi *gsi, u32 channel_id) { struct gsi_channel *channel = &gsi->channel[channel_id]; - u32 retries; int ret; - gsi_channel_freeze(channel); + /* Enable NAPI and the completion interrupt */ + napi_enable(&channel->napi); + gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id); + + ret = __gsi_channel_start(channel, true); + if (ret) { + gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id); + napi_disable(&channel->napi); + } + + return ret; +} - /* RX channels might require a little time to enter STOPPED state */ - retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES; +static int gsi_channel_stop_retry(struct gsi_channel *channel) +{ + u32 retries = GSI_CHANNEL_STOP_RETRIES; + struct gsi *gsi = channel->gsi; + int ret; mutex_lock(&gsi->mutex); @@ -878,18 +919,46 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id) ret = gsi_channel_stop_command(channel); if (ret != -EAGAIN) break; - msleep(1); + usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC); } while (retries--); mutex_unlock(&gsi->mutex); - /* Thaw the channel if we need to retry (or on error) */ - if (ret) - gsi_channel_thaw(channel); + return ret; +} + +static int __gsi_channel_stop(struct gsi_channel *channel, bool stop) +{ + int ret; + + /* Wait for any underway transactions to complete before stopping. */ + gsi_channel_trans_quiesce(channel); + + ret = stop ? gsi_channel_stop_retry(channel) : 0; + /* Finally, ensure NAPI polling has finished. */ + if (!ret) + napi_synchronize(&channel->napi); return ret; } +/* Stop a started channel */ +int gsi_channel_stop(struct gsi *gsi, u32 channel_id) +{ + struct gsi_channel *channel = &gsi->channel[channel_id]; + int ret; + + /* Only disable the completion interrupt if stop is successful */ + ret = __gsi_channel_stop(channel, true); + if (ret) + return ret; + + gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id); + napi_disable(&channel->napi); + + return 0; +} + /* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell) { @@ -913,12 +982,7 @@ int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop) { struct gsi_channel *channel = &gsi->channel[channel_id]; - if (stop) - return gsi_channel_stop(gsi, channel_id); - - gsi_channel_freeze(channel); - - return 0; + return __gsi_channel_stop(channel, stop); } /* Resume a suspended channel (starting will be requested if STOPPED) */ @@ -926,12 +990,7 @@ int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start) { struct gsi_channel *channel = &gsi->channel[channel_id]; - if (start) - return gsi_channel_start(gsi, channel_id); - - gsi_channel_thaw(channel); - - return 0; + return __gsi_channel_start(channel, start); } /** @@ -1178,6 +1237,7 @@ static void gsi_isr_ieob(struct gsi *gsi) u32 event_mask; event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET); + gsi_irq_ieob_disable(gsi, event_mask); iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET); while (event_mask) { @@ -1185,7 +1245,6 @@ static void gsi_isr_ieob(struct gsi *gsi) event_mask ^= BIT(evt_ring_id); - gsi_irq_ieob_disable(gsi, evt_ring_id); napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi); } } @@ -1373,7 +1432,7 @@ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count) /* Hardware requires a 2^n ring size, with alignment equal to size */ ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); if (ring->virt && addr % size) { - dma_free_coherent(dev, size, ring->virt, ring->addr); + dma_free_coherent(dev, size, ring->virt, addr); dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n", size); return -EINVAL; /* Not a good error value, but distinct */ @@ -1430,7 +1489,7 @@ void gsi_channel_doorbell(struct gsi_channel *channel) } /* Consult hardware, move any newly completed transactions to completed list */ -static void gsi_channel_update(struct gsi_channel *channel) +static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel) { u32 evt_ring_id = channel->evt_ring_id; struct gsi *gsi = channel->gsi; @@ -1449,7 +1508,7 @@ static void gsi_channel_update(struct gsi_channel *channel) offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id); index = gsi_ring_index(ring, ioread32(gsi->virt + offset)); if (index == ring->index % ring->count) - return; + return NULL; /* Get the transaction for the latest completed event. Take a * reference to keep it from completing before we give the events @@ -1474,6 +1533,8 @@ static void gsi_channel_update(struct gsi_channel *channel) gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index); gsi_trans_free(trans); + + return gsi_channel_trans_complete(channel); } /** @@ -1494,11 +1555,8 @@ static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel) /* Get the first transaction from the completed list */ trans = gsi_channel_trans_complete(channel); - if (!trans) { - /* List is empty; see if there's more to do */ - gsi_channel_update(channel); - trans = gsi_channel_trans_complete(channel); - } + if (!trans) /* List is empty; see if there's more to do */ + trans = gsi_channel_update(channel); if (trans) gsi_trans_move_polled(trans); @@ -1521,23 +1579,20 @@ static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel) static int gsi_channel_poll(struct napi_struct *napi, int budget) { struct gsi_channel *channel; - int count = 0; + int count; channel = container_of(napi, struct gsi_channel, napi); - while (count < budget) { + for (count = 0; count < budget; count++) { struct gsi_trans *trans; - count++; trans = gsi_channel_poll_one(channel); if (!trans) break; gsi_trans_complete(trans); } - if (count < budget) { - napi_complete(&channel->napi); - gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id); - } + if (count < budget && napi_complete(napi)) + gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id); return count; } @@ -1627,7 +1682,7 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id, enum gsi_generic_cmd_opcode opcode) { struct completion *completion = &gsi->completion; - bool success; + bool timeout; u32 val; /* The error global interrupt type is always enabled (until we @@ -1650,12 +1705,12 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id, val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK); val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK); - success = gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion); + timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion); /* Disable the GP_INT1 IRQ type again */ iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); - if (success) + if (!timeout) return gsi->result; dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n", diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h index 6c2371084c55..802077631371 100644 --- a/drivers/net/ipa/ipa.h +++ b/drivers/net/ipa/ipa.h @@ -43,7 +43,7 @@ enum ipa_flag { * @flags: Boolean state flags * @version: IPA hardware version * @pdev: Platform device - * @modem_rproc: Remoteproc handle for modem subsystem + * @completion: Used to signal pipeline clear transfer complete * @smp2p: SMP2P information * @clock: IPA clocking information * @table_addr: DMA address of filter/route table content @@ -83,7 +83,7 @@ struct ipa { DECLARE_BITMAP(flags, IPA_FLAG_COUNT); enum ipa_version version; struct platform_device *pdev; - struct rproc *modem_rproc; + struct completion completion; struct notifier_block nb; void *notifier; struct ipa_smp2p *smp2p; diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c index 135c393437f1..354675a643db 100644 --- a/drivers/net/ipa/ipa_clock.c +++ b/drivers/net/ipa/ipa_clock.c @@ -31,142 +31,154 @@ */ /** + * struct ipa_interconnect - IPA interconnect information + * @path: Interconnect path + * @average_bandwidth: Average interconnect bandwidth (KB/second) + * @peak_bandwidth: Peak interconnect bandwidth (KB/second) + */ +struct ipa_interconnect { + struct icc_path *path; + u32 average_bandwidth; + u32 peak_bandwidth; +}; + +/** * struct ipa_clock - IPA clocking information * @count: Clocking reference count * @mutex: Protects clock enable/disable * @core: IPA core clock - * @memory_path: Memory interconnect - * @imem_path: Internal memory interconnect - * @config_path: Configuration space interconnect - * @interconnect_data: Interconnect configuration data + * @interconnect_count: Number of elements in interconnect[] + * @interconnect: Interconnect array */ struct ipa_clock { refcount_t count; struct mutex mutex; /* protects clock enable/disable */ struct clk *core; - struct icc_path *memory_path; - struct icc_path *imem_path; - struct icc_path *config_path; - const struct ipa_interconnect_data *interconnect_data; + u32 interconnect_count; + struct ipa_interconnect *interconnect; }; -static struct icc_path * -ipa_interconnect_init_one(struct device *dev, const char *name) +static int ipa_interconnect_init_one(struct device *dev, + struct ipa_interconnect *interconnect, + const struct ipa_interconnect_data *data) { struct icc_path *path; - path = of_icc_get(dev, name); - if (IS_ERR(path)) - dev_err(dev, "error %ld getting %s interconnect\n", - PTR_ERR(path), name); + path = of_icc_get(dev, data->name); + if (IS_ERR(path)) { + int ret = PTR_ERR(path); + + dev_err(dev, "error %d getting %s interconnect\n", ret, + data->name); - return path; + return ret; + } + + interconnect->path = path; + interconnect->average_bandwidth = data->average_bandwidth; + interconnect->peak_bandwidth = data->peak_bandwidth; + + return 0; } -/* Initialize interconnects required for IPA operation */ -static int ipa_interconnect_init(struct ipa_clock *clock, struct device *dev) +static void ipa_interconnect_exit_one(struct ipa_interconnect *interconnect) { - struct icc_path *path; - - path = ipa_interconnect_init_one(dev, "memory"); - if (IS_ERR(path)) - goto err_return; - clock->memory_path = path; + icc_put(interconnect->path); + memset(interconnect, 0, sizeof(*interconnect)); +} - path = ipa_interconnect_init_one(dev, "imem"); - if (IS_ERR(path)) - goto err_memory_path_put; - clock->imem_path = path; +/* Initialize interconnects required for IPA operation */ +static int ipa_interconnect_init(struct ipa_clock *clock, struct device *dev, + const struct ipa_interconnect_data *data) +{ + struct ipa_interconnect *interconnect; + u32 count; + int ret; - path = ipa_interconnect_init_one(dev, "config"); - if (IS_ERR(path)) - goto err_imem_path_put; - clock->config_path = path; + count = clock->interconnect_count; + interconnect = kcalloc(count, sizeof(*interconnect), GFP_KERNEL); + if (!interconnect) + return -ENOMEM; + clock->interconnect = interconnect; + + while (count--) { + ret = ipa_interconnect_init_one(dev, interconnect, data++); + if (ret) + goto out_unwind; + interconnect++; + } return 0; -err_imem_path_put: - icc_put(clock->imem_path); -err_memory_path_put: - icc_put(clock->memory_path); -err_return: - return PTR_ERR(path); +out_unwind: + while (interconnect-- > clock->interconnect) + ipa_interconnect_exit_one(interconnect); + kfree(clock->interconnect); + clock->interconnect = NULL; + + return ret; } /* Inverse of ipa_interconnect_init() */ static void ipa_interconnect_exit(struct ipa_clock *clock) { - icc_put(clock->config_path); - icc_put(clock->imem_path); - icc_put(clock->memory_path); + struct ipa_interconnect *interconnect; + + interconnect = clock->interconnect + clock->interconnect_count; + while (interconnect-- > clock->interconnect) + ipa_interconnect_exit_one(interconnect); + kfree(clock->interconnect); + clock->interconnect = NULL; } /* Currently we only use one bandwidth level, so just "enable" interconnects */ static int ipa_interconnect_enable(struct ipa *ipa) { - const struct ipa_interconnect_data *data; + struct ipa_interconnect *interconnect; struct ipa_clock *clock = ipa->clock; int ret; - - data = &clock->interconnect_data[IPA_INTERCONNECT_MEMORY]; - ret = icc_set_bw(clock->memory_path, data->average_rate, - data->peak_rate); - if (ret) - return ret; - - data = &clock->interconnect_data[IPA_INTERCONNECT_IMEM]; - ret = icc_set_bw(clock->imem_path, data->average_rate, - data->peak_rate); - if (ret) - goto err_memory_path_disable; - - data = &clock->interconnect_data[IPA_INTERCONNECT_CONFIG]; - ret = icc_set_bw(clock->config_path, data->average_rate, - data->peak_rate); - if (ret) - goto err_imem_path_disable; + u32 i; + + interconnect = clock->interconnect; + for (i = 0; i < clock->interconnect_count; i++) { + ret = icc_set_bw(interconnect->path, + interconnect->average_bandwidth, + interconnect->peak_bandwidth); + if (ret) + goto out_unwind; + interconnect++; + } return 0; -err_imem_path_disable: - (void)icc_set_bw(clock->imem_path, 0, 0); -err_memory_path_disable: - (void)icc_set_bw(clock->memory_path, 0, 0); +out_unwind: + while (interconnect-- > clock->interconnect) + (void)icc_set_bw(interconnect->path, 0, 0); return ret; } /* To disable an interconnect, we just its bandwidth to 0 */ -static int ipa_interconnect_disable(struct ipa *ipa) +static void ipa_interconnect_disable(struct ipa *ipa) { - const struct ipa_interconnect_data *data; + struct ipa_interconnect *interconnect; struct ipa_clock *clock = ipa->clock; + int result = 0; + u32 count; int ret; - ret = icc_set_bw(clock->memory_path, 0, 0); - if (ret) - return ret; - - ret = icc_set_bw(clock->imem_path, 0, 0); - if (ret) - goto err_memory_path_reenable; - - ret = icc_set_bw(clock->config_path, 0, 0); - if (ret) - goto err_imem_path_reenable; - - return 0; - -err_imem_path_reenable: - data = &clock->interconnect_data[IPA_INTERCONNECT_IMEM]; - (void)icc_set_bw(clock->imem_path, data->average_rate, - data->peak_rate); -err_memory_path_reenable: - data = &clock->interconnect_data[IPA_INTERCONNECT_MEMORY]; - (void)icc_set_bw(clock->memory_path, data->average_rate, - data->peak_rate); + count = clock->interconnect_count; + interconnect = clock->interconnect + count; + while (count--) { + interconnect--; + ret = icc_set_bw(interconnect->path, 0, 0); + if (ret && !result) + result = ret; + } - return ret; + if (result) + dev_err(&ipa->pdev->dev, + "error %d disabling IPA interconnects\n", ret); } /* Turn on IPA clocks, including interconnects */ @@ -189,7 +201,7 @@ static int ipa_clock_enable(struct ipa *ipa) static void ipa_clock_disable(struct ipa *ipa) { clk_disable_unprepare(ipa->clock->core); - (void)ipa_interconnect_disable(ipa); + ipa_interconnect_disable(ipa); } /* Get an IPA clock reference, but only if the reference count is @@ -286,9 +298,9 @@ ipa_clock_init(struct device *dev, const struct ipa_clock_data *data) goto err_clk_put; } clock->core = clk; - clock->interconnect_data = data->interconnect; + clock->interconnect_count = data->interconnect_count; - ret = ipa_interconnect_init(clock, dev); + ret = ipa_interconnect_init(clock, dev, data->interconnect_data); if (ret) goto err_kfree; diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index 002e51448510..97b50fee6008 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -529,7 +529,7 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size, direction, opcode); } -static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans, u64 tag) +static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans) { struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS; @@ -543,14 +543,14 @@ static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans, u64 tag) cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr); payload = &cmd_payload->ip_packet_tag_status; - payload->tag = u64_encode_bits(tag, IP_PACKET_TAG_STATUS_TAG_FMASK); + payload->tag = le64_encode_bits(0, IP_PACKET_TAG_STATUS_TAG_FMASK); gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, direction, opcode); } /* Issue a small command TX data transfer */ -static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size) +static void ipa_cmd_transfer_add(struct gsi_trans *trans) { struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); enum dma_data_direction direction = DMA_TO_DEVICE; @@ -558,8 +558,6 @@ static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size) union ipa_cmd_payload *payload; dma_addr_t payload_addr; - /* assert(size <= sizeof(*payload)); */ - /* Just transfer a zero-filled payload structure */ payload = ipa_cmd_payload_alloc(ipa, &payload_addr); @@ -567,34 +565,53 @@ static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size) direction, opcode); } -void ipa_cmd_tag_process_add(struct gsi_trans *trans) +/* Add immediate commands to a transaction to clear the hardware pipeline */ +void ipa_cmd_pipeline_clear_add(struct gsi_trans *trans) { struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); struct ipa_endpoint *endpoint; - endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]; + /* This will complete when the transfer is received */ + reinit_completion(&ipa->completion); + /* Issue a no-op register write command (mask 0 means no write) */ ipa_cmd_register_write_add(trans, 0, 0, 0, true); + + /* Send a data packet through the IPA pipeline. The packet_init + * command says to send the next packet directly to the exception + * endpoint without any other IPA processing. The tag_status + * command requests that status be generated on completion of + * that transfer, and that it will be tagged with a value. + * Finally, the transfer command sends a small packet of data + * (instead of a command) using the command endpoint. + */ + endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]; ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id); - ipa_cmd_ip_tag_status_add(trans, 0xcba987654321); - ipa_cmd_transfer_add(trans, 4); + ipa_cmd_ip_tag_status_add(trans); + ipa_cmd_transfer_add(trans); } -/* Returns the number of commands required for the tag process */ -u32 ipa_cmd_tag_process_count(void) +/* Returns the number of commands required to clear the pipeline */ +u32 ipa_cmd_pipeline_clear_count(void) { return 4; } -void ipa_cmd_tag_process(struct ipa *ipa) +void ipa_cmd_pipeline_clear_wait(struct ipa *ipa) +{ + wait_for_completion(&ipa->completion); +} + +void ipa_cmd_pipeline_clear(struct ipa *ipa) { - u32 count = ipa_cmd_tag_process_count(); + u32 count = ipa_cmd_pipeline_clear_count(); struct gsi_trans *trans; trans = ipa_cmd_trans_alloc(ipa, count); if (trans) { - ipa_cmd_tag_process_add(trans); + ipa_cmd_pipeline_clear_add(trans); gsi_trans_commit_wait(trans); + ipa_cmd_pipeline_clear_wait(ipa); } else { dev_err(&ipa->pdev->dev, "error allocating %u entry tag transaction\n", count); diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h index 4ed09c486abc..6dd3d35cf315 100644 --- a/drivers/net/ipa/ipa_cmd.h +++ b/drivers/net/ipa/ipa_cmd.h @@ -157,26 +157,30 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size, dma_addr_t addr, bool toward_ipa); /** - * ipa_cmd_tag_process_add() - Add IPA tag process commands to a transaction + * ipa_cmd_pipeline_clear_add() - Add pipeline clear commands to a transaction * @trans: GSI transaction */ -void ipa_cmd_tag_process_add(struct gsi_trans *trans); +void ipa_cmd_pipeline_clear_add(struct gsi_trans *trans); /** - * ipa_cmd_tag_process_add_count() - Number of commands in a tag process + * ipa_cmd_pipeline_clear_count() - # commands required to clear pipeline * * Return: The number of elements to allocate in a transaction - * to hold tag process commands + * to hold commands to clear the pipeline */ -u32 ipa_cmd_tag_process_count(void); +u32 ipa_cmd_pipeline_clear_count(void); /** - * ipa_cmd_tag_process() - Perform a tag process - * - * @Return: The number of elements to allocate in a transaction - * to hold tag process commands + * ipa_cmd_pipeline_clear_wait() - Wait pipeline clear to complete + * @ipa: - IPA pointer + */ +void ipa_cmd_pipeline_clear_wait(struct ipa *ipa); + +/** + * ipa_cmd_pipeline_clear() - Clear the hardware pipeline + * @ipa: - IPA pointer */ -void ipa_cmd_tag_process(struct ipa *ipa); +void ipa_cmd_pipeline_clear(struct ipa *ipa); /** * ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint diff --git a/drivers/net/ipa/ipa_data-sc7180.c b/drivers/net/ipa/ipa_data-sc7180.c index 5cc0ed77edb9..997b51ceb7d7 100644 --- a/drivers/net/ipa/ipa_data-sc7180.c +++ b/drivers/net/ipa/ipa_data-sc7180.c @@ -309,24 +309,30 @@ static struct ipa_mem_data ipa_mem_data = { .smem_size = 0x00002000, }; +/* Interconnect bandwidths are in 1000 byte/second units */ +static struct ipa_interconnect_data ipa_interconnect_data[] = { + { + .name = "memory", + .peak_bandwidth = 465000, /* 465 MBps */ + .average_bandwidth = 80000, /* 80 MBps */ + }, + /* Average bandwidth is unused for the next two interconnects */ + { + .name = "imem", + .peak_bandwidth = 68570, /* 68.570 MBps */ + .average_bandwidth = 0, /* unused */ + }, + { + .name = "config", + .peak_bandwidth = 30000, /* 30 MBps */ + .average_bandwidth = 0, /* unused */ + }, +}; + static struct ipa_clock_data ipa_clock_data = { .core_clock_rate = 100 * 1000 * 1000, /* Hz */ - /* Interconnect rates are in 1000 byte/second units */ - .interconnect = { - [IPA_INTERCONNECT_MEMORY] = { - .peak_rate = 465000, /* 465 MBps */ - .average_rate = 80000, /* 80 MBps */ - }, - /* Average rate is unused for the next two interconnects */ - [IPA_INTERCONNECT_IMEM] = { - .peak_rate = 68570, /* 68.570 MBps */ - .average_rate = 0, /* unused */ - }, - [IPA_INTERCONNECT_CONFIG] = { - .peak_rate = 30000, /* 30 MBps */ - .average_rate = 0, /* unused */ - }, - }, + .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), + .interconnect_data = ipa_interconnect_data, }; /* Configuration data for the SC7180 SoC. */ diff --git a/drivers/net/ipa/ipa_data-sdm845.c b/drivers/net/ipa/ipa_data-sdm845.c index f8fee8d3ca42..88c9c3562ab7 100644 --- a/drivers/net/ipa/ipa_data-sdm845.c +++ b/drivers/net/ipa/ipa_data-sdm845.c @@ -329,24 +329,30 @@ static struct ipa_mem_data ipa_mem_data = { .smem_size = 0x00002000, }; +/* Interconnect bandwidths are in 1000 byte/second units */ +static struct ipa_interconnect_data ipa_interconnect_data[] = { + { + .name = "memory", + .peak_bandwidth = 600000, /* 600 MBps */ + .average_bandwidth = 80000, /* 80 MBps */ + }, + /* Average bandwidth is unused for the next two interconnects */ + { + .name = "imem", + .peak_bandwidth = 350000, /* 350 MBps */ + .average_bandwidth = 0, /* unused */ + }, + { + .name = "config", + .peak_bandwidth = 40000, /* 40 MBps */ + .average_bandwidth = 0, /* unused */ + }, +}; + static struct ipa_clock_data ipa_clock_data = { .core_clock_rate = 75 * 1000 * 1000, /* Hz */ - /* Interconnect rates are in 1000 byte/second units */ - .interconnect = { - [IPA_INTERCONNECT_MEMORY] = { - .peak_rate = 600000, /* 600 MBps */ - .average_rate = 80000, /* 80 MBps */ - }, - /* Average rate is unused for the next two interconnects */ - [IPA_INTERCONNECT_IMEM] = { - .peak_rate = 350000, /* 350 MBps */ - .average_rate = 0, /* unused */ - }, - [IPA_INTERCONNECT_CONFIG] = { - .peak_rate = 40000, /* 40 MBps */ - .average_rate = 0, /* unused */ - }, - }, + .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), + .interconnect_data = ipa_interconnect_data, }; /* Configuration data for the SDM845 SoC. */ diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h index 0ed5ffe2b8da..b476fc373f7f 100644 --- a/drivers/net/ipa/ipa_data.h +++ b/drivers/net/ipa/ipa_data.h @@ -258,32 +258,28 @@ struct ipa_mem_data { u32 smem_size; }; -/** enum ipa_interconnect_id - IPA interconnect identifier */ -enum ipa_interconnect_id { - IPA_INTERCONNECT_MEMORY, - IPA_INTERCONNECT_IMEM, - IPA_INTERCONNECT_CONFIG, - IPA_INTERCONNECT_COUNT, /* Last; not an interconnect */ -}; - /** - * struct ipa_interconnect_data - description of IPA interconnect rates - * @peak_rate: Peak interconnect bandwidth (in 1000 byte/sec units) - * @average_rate: Average interconnect bandwidth (in 1000 byte/sec units) + * struct ipa_interconnect_data - description of IPA interconnect bandwidths + * @name: Interconnect name (matches interconnect-name in DT) + * @peak_bandwidth: Peak interconnect bandwidth (in 1000 byte/sec units) + * @average_bandwidth: Average interconnect bandwidth (in 1000 byte/sec units) */ struct ipa_interconnect_data { - u32 peak_rate; - u32 average_rate; + const char *name; + u32 peak_bandwidth; + u32 average_bandwidth; }; /** * struct ipa_clock_data - description of IPA clock and interconnect rates * @core_clock_rate: Core clock rate (Hz) - * @interconnect: Array of interconnect bandwidth parameters + * @interconnect_count: Number of entries in the interconnect_data array + * @interconnect_data: IPA interconnect configuration data */ struct ipa_clock_data { u32 core_clock_rate; - struct ipa_interconnect_data interconnect[IPA_INTERCONNECT_COUNT]; + u32 interconnect_count; /* # entries in interconnect_data[] */ + const struct ipa_interconnect_data *interconnect_data; }; /** diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 9f4be9812a1f..7a46c790afbe 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -69,8 +69,11 @@ struct ipa_status { }; /* Field masks for struct ipa_status structure fields */ +#define IPA_STATUS_MASK_TAG_VALID_FMASK GENMASK(4, 4) +#define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0) #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0) #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22) +#define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16) #ifdef IPA_VALIDATE @@ -399,7 +402,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) * That won't happen, and we could be more precise, but this is fine * for now. We need to end the transaction with a "tag process." */ - count = hweight32(initialized) + ipa_cmd_tag_process_count(); + count = hweight32(initialized) + ipa_cmd_pipeline_clear_count(); trans = ipa_cmd_trans_alloc(ipa, count); if (!trans) { dev_err(&ipa->pdev->dev, @@ -428,11 +431,13 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) ipa_cmd_register_write_add(trans, offset, 0, ~0, false); } - ipa_cmd_tag_process_add(trans); + ipa_cmd_pipeline_clear_add(trans); /* XXX This should have a 1 second timeout */ gsi_trans_commit_wait(trans); + ipa_cmd_pipeline_clear_wait(ipa); + return 0; } @@ -588,7 +593,7 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) /* Note that HDR_ENDIANNESS indicates big endian header fields */ if (endpoint->data->qmap) - val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK); + val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK); iowrite32(val, endpoint->ipa->reg_virt + offset); } @@ -1164,19 +1169,53 @@ static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, return true; if (!status->pkt_len) return true; - endpoint_id = u32_get_bits(status->endp_dst_idx, - IPA_STATUS_DST_IDX_FMASK); + endpoint_id = u8_get_bits(status->endp_dst_idx, + IPA_STATUS_DST_IDX_FMASK); if (endpoint_id != endpoint->endpoint_id) return true; return false; /* Don't skip this packet, process it */ } +static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint, + const struct ipa_status *status) +{ + struct ipa_endpoint *command_endpoint; + struct ipa *ipa = endpoint->ipa; + u32 endpoint_id; + + if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK)) + return false; /* No valid tag */ + + /* The status contains a valid tag. We know the packet was sent to + * this endpoint (already verified by ipa_endpoint_status_skip()). + * If the packet came from the AP->command TX endpoint we know + * this packet was sent as part of the pipeline clear process. + */ + endpoint_id = u8_get_bits(status->endp_src_idx, + IPA_STATUS_SRC_IDX_FMASK); + command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; + if (endpoint_id == command_endpoint->endpoint_id) { + complete(&ipa->completion); + } else { + dev_err(&ipa->pdev->dev, + "unexpected tagged packet from endpoint %u\n", + endpoint_id); + } + + return true; +} + /* Return whether the status indicates the packet should be dropped */ -static bool ipa_status_drop_packet(const struct ipa_status *status) +static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, + const struct ipa_status *status) { u32 val; + /* If the status indicates a tagged transfer, we'll drop the packet */ + if (ipa_endpoint_status_tag(endpoint, status)) + return true; + /* Deaggregation exceptions we drop; all other types we consume */ if (status->exception) return status->exception == IPA_STATUS_EXCEPTION_DEAGGR; @@ -1213,12 +1252,11 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, continue; } - /* Compute the amount of buffer space consumed by the - * packet, including the status element. If the hardware - * is configured to pad packet data to an aligned boundary, - * account for that. And if checksum offload is is enabled - * a trailer containing computed checksum information will - * be appended. + /* Compute the amount of buffer space consumed by the packet, + * including the status element. If the hardware is configured + * to pad packet data to an aligned boundary, account for that. + * And if checksum offload is enabled a trailer containing + * computed checksum information will be appended. */ align = endpoint->data->rx.pad_align ? : 1; len = le16_to_cpu(status->pkt_len); @@ -1226,16 +1264,21 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, if (endpoint->data->checksum) len += sizeof(struct rmnet_map_dl_csum_trailer); - /* Charge the new packet with a proportional fraction of - * the unused space in the original receive buffer. - * XXX Charge a proportion of the *whole* receive buffer? - */ - if (!ipa_status_drop_packet(status)) { - u32 extra = unused * len / total_len; - void *data2 = data + sizeof(*status); - u32 len2 = le16_to_cpu(status->pkt_len); + if (!ipa_endpoint_status_drop(endpoint, status)) { + void *data2; + u32 extra; + u32 len2; /* Client receives only packet data (no status) */ + data2 = data + sizeof(*status); + len2 = le16_to_cpu(status->pkt_len); + + /* Have the true size reflect the extra unused space in + * the original receive buffer. Distribute the "cost" + * proportionately across all aggregated packets in the + * buffer. + */ + extra = DIV_ROUND_CLOSEST(unused * len, total_len); ipa_endpoint_skb_copy(endpoint, data2, len2, extra); } @@ -1378,7 +1421,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) do { if (!ipa_endpoint_aggr_active(endpoint)) break; - msleep(1); + usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); } while (retries--); /* Check one last time */ @@ -1399,7 +1442,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) */ gsi_channel_reset(gsi, endpoint->channel_id, true); - msleep(1); + usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); goto out_suspend_again; @@ -1564,7 +1607,7 @@ void ipa_endpoint_suspend(struct ipa *ipa) if (ipa->modem_netdev) ipa_modem_suspend(ipa->modem_netdev); - ipa_cmd_tag_process(ipa); + ipa_cmd_pipeline_clear(ipa); ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c index 84bb8ae92725..c10e7340b031 100644 --- a/drivers/net/ipa/ipa_main.c +++ b/drivers/net/ipa/ipa_main.c @@ -15,7 +15,6 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_address.h> -#include <linux/remoteproc.h> #include <linux/qcom_scm.h> #include <linux/soc/qcom/mdt_loader.h> @@ -729,19 +728,6 @@ static const struct of_device_id ipa_match[] = { }; MODULE_DEVICE_TABLE(of, ipa_match); -static phandle of_property_read_phandle(const struct device_node *np, - const char *name) -{ - struct property *prop; - int len = 0; - - prop = of_find_property(np, name, &len); - if (!prop || len != sizeof(__be32)) - return 0; - - return be32_to_cpup(prop->value); -} - /* Check things that can be validated at build time. This just * groups these things BUILD_BUG_ON() calls don't clutter the rest * of the code. @@ -807,10 +793,8 @@ static int ipa_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; const struct ipa_data *data; struct ipa_clock *clock; - struct rproc *rproc; bool modem_init; struct ipa *ipa; - phandle ph; int ret; ipa_validate_build(); @@ -829,25 +813,12 @@ static int ipa_probe(struct platform_device *pdev) if (!qcom_scm_is_available()) return -EPROBE_DEFER; - /* We rely on remoteproc to tell us about modem state changes */ - ph = of_property_read_phandle(dev->of_node, "modem-remoteproc"); - if (!ph) { - dev_err(dev, "DT missing \"modem-remoteproc\" property\n"); - return -EINVAL; - } - - rproc = rproc_get_by_phandle(ph); - if (!rproc) - return -EPROBE_DEFER; - /* The clock and interconnects might not be ready when we're * probed, so might return -EPROBE_DEFER. */ clock = ipa_clock_init(dev, data->clock_data); - if (IS_ERR(clock)) { - ret = PTR_ERR(clock); - goto err_rproc_put; - } + if (IS_ERR(clock)) + return PTR_ERR(clock); /* No more EPROBE_DEFER. Allocate and initialize the IPA structure */ ipa = kzalloc(sizeof(*ipa), GFP_KERNEL); @@ -858,9 +829,9 @@ static int ipa_probe(struct platform_device *pdev) ipa->pdev = pdev; dev_set_drvdata(dev, ipa); - ipa->modem_rproc = rproc; ipa->clock = clock; ipa->version = data->version; + init_completion(&ipa->completion); ret = ipa_reg_init(ipa); if (ret) @@ -935,8 +906,6 @@ err_kfree_ipa: kfree(ipa); err_clock_exit: ipa_clock_exit(clock); -err_rproc_put: - rproc_put(rproc); return ret; } @@ -944,7 +913,6 @@ err_rproc_put: static int ipa_remove(struct platform_device *pdev) { struct ipa *ipa = dev_get_drvdata(&pdev->dev); - struct rproc *rproc = ipa->modem_rproc; struct ipa_clock *clock = ipa->clock; int ret; @@ -970,7 +938,6 @@ static int ipa_remove(struct platform_device *pdev) ipa_reg_exit(ipa); kfree(ipa); ipa_clock_exit(clock); - rproc_put(rproc); return 0; } diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c index 0cc3a3374caa..f25029b9ec85 100644 --- a/drivers/net/ipa/ipa_mem.c +++ b/drivers/net/ipa/ipa_mem.c @@ -336,7 +336,7 @@ static void ipa_imem_exit(struct ipa *ipa) size = iommu_unmap(domain, ipa->imem_iova, ipa->imem_size); if (size != ipa->imem_size) - dev_warn(dev, "unmapped %zu IMEM bytes, expected %lu\n", + dev_warn(dev, "unmapped %zu IMEM bytes, expected %zu\n", size, ipa->imem_size); } else { dev_err(dev, "couldn't get IPA IOMMU domain for IMEM\n"); @@ -440,7 +440,7 @@ static void ipa_smem_exit(struct ipa *ipa) size = iommu_unmap(domain, ipa->smem_iova, ipa->smem_size); if (size != ipa->smem_size) - dev_warn(dev, "unmapped %zu SMEM bytes, expected %lu\n", + dev_warn(dev, "unmapped %zu SMEM bytes, expected %zu\n", size, ipa->smem_size); } else { diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c index e34fe2d77324..9b08eb823984 100644 --- a/drivers/net/ipa/ipa_modem.c +++ b/drivers/net/ipa/ipa_modem.c @@ -216,6 +216,7 @@ int ipa_modem_start(struct ipa *ipa) ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev; ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev; + SET_NETDEV_DEV(netdev, &ipa->pdev->dev); priv = netdev_priv(netdev); priv->ipa = ipa; |