From 809b1b04df898b6d182069146231a3cbf5f2d9cc Mon Sep 17 00:00:00 2001 From: Robin Gong Date: Wed, 17 Jun 2020 06:42:08 +0800 Subject: spi: introduce fallback to pio Add fallback to pio mode in case dma transfer failed with error status SPI_TRANS_FAIL_NO_START. If spi client driver want to enable this feature please set xfer->error in the proper place such as dmaengine_prep_slave_sg() failure detect(but no any data put into spi bus yet). Besides, add master->fallback checking in its can_dma() so that spi core could switch to pio next time. Please refer to spi-imx.c. Signed-off-by: Robin Gong Link: https://lore.kernel.org/r/1592347329-28363-2-git-send-email-yibin.gong@nxp.com Signed-off-by: Mark Brown --- drivers/spi/spi.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers/spi/spi.c') diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 8158e281f354..6fa56590bba2 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -982,6 +982,8 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); } + ctlr->cur_msg_mapped = false; + return 0; } #else /* !CONFIG_HAS_DMA */ @@ -1234,8 +1236,17 @@ static int spi_transfer_one_message(struct spi_controller *ctlr, if (xfer->tx_buf || xfer->rx_buf) { reinit_completion(&ctlr->xfer_completion); +fallback_pio: ret = ctlr->transfer_one(ctlr, msg->spi, xfer); if (ret < 0) { + if (ctlr->cur_msg_mapped && + (xfer->error & SPI_TRANS_FAIL_NO_START)) { + __spi_unmap_msg(ctlr, msg); + ctlr->fallback = true; + xfer->error &= ~SPI_TRANS_FAIL_NO_START; + goto fallback_pio; + } + SPI_STATISTICS_INCREMENT_FIELD(statm, errors); SPI_STATISTICS_INCREMENT_FIELD(stats, @@ -1693,6 +1704,7 @@ void spi_finalize_current_message(struct spi_controller *ctlr) spin_lock_irqsave(&ctlr->queue_lock, flags); ctlr->cur_msg = NULL; ctlr->cur_msg_prepared = false; + ctlr->fallback = false; kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); spin_unlock_irqrestore(&ctlr->queue_lock, flags); -- cgit From d40f0b6f2e21f2400ae8b1b120d11877d9ffd8ec Mon Sep 17 00:00:00 2001 From: Douglas Anderson Date: Mon, 29 Jun 2020 16:41:06 -0700 Subject: spi: Avoid setting the chip select if we don't need to On some SPI controllers (like spi-geni-qcom) setting the chip select is a heavy operation. For instance on spi-geni-qcom, with the current code, is was measured as taking upwards of 20 us. Even on SPI controllers that aren't as heavy, setting the chip select is at least something like a MMIO operation over some peripheral bus which isn't as fast as a RAM access. While it would be good to find ways to mitigate problems like this in the drivers for those SPI controllers, it can also be noted that the SPI framework could also help out. Specifically, in some situations, we can see the SPI framework calling the driver's set_cs() with the same parameter several times in a row. This is specifically observed when looking at the way the Chrome OS EC SPI driver (cros_ec_spi) works but other drivers likely trip it to some extent. Let's solve this by caching the chip select state in the core and only calling into the controller if there was a change. We check not only the "enable" state but also the chip select mode (active high or active low) since controllers may care about both the mode and the enable flag in their callback. Signed-off-by: Douglas Anderson Link: https://lore.kernel.org/r/20200629164103.1.Ied8e8ad8bbb2df7f947e3bc5ea1c315e041785a2@changeid Signed-off-by: Mark Brown --- drivers/spi/spi.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers/spi/spi.c') diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 6fa56590bba2..d4ba723a30da 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -778,6 +778,17 @@ static void spi_set_cs(struct spi_device *spi, bool enable) { bool enable1 = enable; + /* + * Avoid calling into the driver (or doing delays) if the chip select + * isn't actually changing from the last time this was called. + */ + if ((spi->controller->last_cs_enable == enable) && + (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH))) + return; + + spi->controller->last_cs_enable = enable; + spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; + if (!spi->controller->set_cs_timing) { if (enable1) spi_delay_exec(&spi->controller->cs_setup, NULL); -- cgit From 60a883d119ab9ef63f830c85bbd2f0e2e2314f4f Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Thu, 9 Jul 2020 08:50:07 +0200 Subject: spi: use kthread_create_worker() helper Use kthread_create_worker() helper to simplify the code. It uses the kthread worker API the right way. It will eventually allow to remove the FIXME in kthread_worker_fn() and add more consistency checks in the future. Signed-off-by: Marek Szyprowski Reviewed-by: Petr Mladek Link: https://lore.kernel.org/r/20200709065007.26896-1-m.szyprowski@samsung.com Signed-off-by: Mark Brown --- drivers/spi/spi.c | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) (limited to 'drivers/spi/spi.c') diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index d4ba723a30da..1d7bba434225 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1368,7 +1368,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) /* If another context is idling the device then defer */ if (ctlr->idling) { - kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); + kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); spin_unlock_irqrestore(&ctlr->queue_lock, flags); return; } @@ -1382,7 +1382,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) /* Only do teardown in the thread */ if (!in_kthread) { - kthread_queue_work(&ctlr->kworker, + kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); spin_unlock_irqrestore(&ctlr->queue_lock, flags); return; @@ -1618,7 +1618,7 @@ static void spi_set_thread_rt(struct spi_controller *ctlr) dev_info(&ctlr->dev, "will run message pump with realtime priority\n"); - sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, ¶m); + sched_setscheduler(ctlr->kworker->task, SCHED_FIFO, ¶m); } static int spi_init_queue(struct spi_controller *ctlr) @@ -1626,13 +1626,12 @@ static int spi_init_queue(struct spi_controller *ctlr) ctlr->running = false; ctlr->busy = false; - kthread_init_worker(&ctlr->kworker); - ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker, - "%s", dev_name(&ctlr->dev)); - if (IS_ERR(ctlr->kworker_task)) { - dev_err(&ctlr->dev, "failed to create message pump task\n"); - return PTR_ERR(ctlr->kworker_task); + ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev)); + if (IS_ERR(ctlr->kworker)) { + dev_err(&ctlr->dev, "failed to create message pump kworker\n"); + return PTR_ERR(ctlr->kworker); } + kthread_init_work(&ctlr->pump_messages, spi_pump_messages); /* @@ -1716,7 +1715,7 @@ void spi_finalize_current_message(struct spi_controller *ctlr) ctlr->cur_msg = NULL; ctlr->cur_msg_prepared = false; ctlr->fallback = false; - kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); + kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); spin_unlock_irqrestore(&ctlr->queue_lock, flags); trace_spi_message_done(mesg); @@ -1742,7 +1741,7 @@ static int spi_start_queue(struct spi_controller *ctlr) ctlr->cur_msg = NULL; spin_unlock_irqrestore(&ctlr->queue_lock, flags); - kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); + kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); return 0; } @@ -1798,8 +1797,7 @@ static int spi_destroy_queue(struct spi_controller *ctlr) return ret; } - kthread_flush_worker(&ctlr->kworker); - kthread_stop(ctlr->kworker_task); + kthread_destroy_worker(ctlr->kworker); return 0; } @@ -1822,7 +1820,7 @@ static int __spi_queued_transfer(struct spi_device *spi, list_add_tail(&msg->queue, &ctlr->queue); if (!ctlr->busy && need_pump) - kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); + kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); spin_unlock_irqrestore(&ctlr->queue_lock, flags); return 0; -- cgit From e126859729ed4a5143e5690186b8bec1c1157113 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 15 Jul 2020 17:36:10 +0100 Subject: spi: Only defer to thread for cleanup when needed Currently we always defer idling of controllers to the SPI thread, the goal being to ensure that we're doing teardown that's not suitable for atomic context in an appropriate context and to try to batch up more expensive teardown operations when the system is under higher load, allowing more work to be started before the SPI thread is scheduled. However when the controller does not require any substantial work to idle there is no need to do this, we can instead save the context switch and immediately mark the controller as idle. This is particularly useful for systems where there is frequent but not constant activity. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20200715163610.9475-1-broonie@kernel.org Signed-off-by: Mark Brown --- drivers/spi/spi.c | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) (limited to 'drivers/spi/spi.c') diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 1d7bba434225..0b260484b4f5 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1336,6 +1336,14 @@ void spi_finalize_current_transfer(struct spi_controller *ctlr) } EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); +static void spi_idle_runtime_pm(struct spi_controller *ctlr) +{ + if (ctlr->auto_runtime_pm) { + pm_runtime_mark_last_busy(ctlr->dev.parent); + pm_runtime_put_autosuspend(ctlr->dev.parent); + } +} + /** * __spi_pump_messages - function which processes spi message queue * @ctlr: controller to process queue for @@ -1380,10 +1388,17 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) return; } - /* Only do teardown in the thread */ + /* Defer any non-atomic teardown to the thread */ if (!in_kthread) { - kthread_queue_work(ctlr->kworker, - &ctlr->pump_messages); + if (!ctlr->dummy_rx && !ctlr->dummy_tx && + !ctlr->unprepare_transfer_hardware) { + spi_idle_runtime_pm(ctlr); + ctlr->busy = false; + trace_spi_controller_idle(ctlr); + } else { + kthread_queue_work(ctlr->kworker, + &ctlr->pump_messages); + } spin_unlock_irqrestore(&ctlr->queue_lock, flags); return; } @@ -1400,10 +1415,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) ctlr->unprepare_transfer_hardware(ctlr)) dev_err(&ctlr->dev, "failed to unprepare transfer hardware\n"); - if (ctlr->auto_runtime_pm) { - pm_runtime_mark_last_busy(ctlr->dev.parent); - pm_runtime_put_autosuspend(ctlr->dev.parent); - } + spi_idle_runtime_pm(ctlr); trace_spi_controller_idle(ctlr); spin_lock_irqsave(&ctlr->queue_lock, flags); -- cgit