diff options
Diffstat (limited to 'drivers/spi/spi.c')
-rw-r--r-- | drivers/spi/spi.c | 870 |
1 files changed, 496 insertions, 374 deletions
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index f2170f4b5077..ff07c87dbadc 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -312,7 +312,7 @@ static const struct attribute_group *spi_master_groups[] = { static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats, struct spi_transfer *xfer, - struct spi_controller *ctlr) + struct spi_message *msg) { int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; struct spi_statistics *stats; @@ -328,11 +328,9 @@ static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pc u64_stats_inc(&stats->transfer_bytes_histo[l2len]); u64_stats_add(&stats->bytes, xfer->len); - if ((xfer->tx_buf) && - (xfer->tx_buf != ctlr->dummy_tx)) + if (spi_valid_txbuf(msg, xfer)) u64_stats_add(&stats->bytes_tx, xfer->len); - if ((xfer->rx_buf) && - (xfer->rx_buf != ctlr->dummy_rx)) + if (spi_valid_rxbuf(msg, xfer)) u64_stats_add(&stats->bytes_rx, xfer->len); u64_stats_update_end(&stats->syncp); @@ -373,7 +371,7 @@ const void *spi_get_device_match_data(const struct spi_device *sdev) } EXPORT_SYMBOL_GPL(spi_get_device_match_data); -static int spi_match_device(struct device *dev, struct device_driver *drv) +static int spi_match_device(struct device *dev, const struct device_driver *drv) { const struct spi_device *spi = to_spi_device(dev); const struct spi_driver *sdrv = to_spi_driver(drv); @@ -412,19 +410,21 @@ static int spi_probe(struct device *dev) { const struct spi_driver *sdrv = to_spi_driver(dev->driver); struct spi_device *spi = to_spi_device(dev); + struct fwnode_handle *fwnode = dev_fwnode(dev); int ret; ret = of_clk_set_defaults(dev->of_node, false); if (ret) return ret; - if (dev->of_node) { + if (is_of_node(fwnode)) spi->irq = of_irq_get(dev->of_node, 0); - if (spi->irq == -EPROBE_DEFER) - return -EPROBE_DEFER; - if (spi->irq < 0) - spi->irq = 0; - } + else if (is_acpi_device_node(fwnode) && spi->irq < 0) + spi->irq = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), 0); + if (spi->irq == -EPROBE_DEFER) + return dev_err_probe(dev, spi->irq, "Failed to get irq\n"); + if (spi->irq < 0) + spi->irq = 0; ret = dev_pm_domain_attach(dev, true); if (ret) @@ -459,7 +459,7 @@ static void spi_shutdown(struct device *dev) } } -struct bus_type spi_bus_type = { +const struct bus_type spi_bus_type = { .name = "spi", .dev_groups = spi_dev_groups, .match = spi_match_device, @@ -584,7 +584,7 @@ struct spi_device *spi_alloc_device(struct spi_controller *ctlr) return NULL; } - spi->master = spi->controller = ctlr; + spi->controller = ctlr; spi->dev.parent = &ctlr->dev; spi->dev.bus = &spi_bus_type; spi->dev.release = spidev_release; @@ -597,10 +597,16 @@ EXPORT_SYMBOL_GPL(spi_alloc_device); static void spi_dev_set_name(struct spi_device *spi) { - struct acpi_device *adev = ACPI_COMPANION(&spi->dev); + struct device *dev = &spi->dev; + struct fwnode_handle *fwnode = dev_fwnode(dev); + + if (is_acpi_device_node(fwnode)) { + dev_set_name(dev, "spi-%s", acpi_dev_name(to_acpi_device_node(fwnode))); + return; + } - if (adev) { - dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); + if (is_software_node(fwnode)) { + dev_set_name(dev, "spi-%pfwP", fwnode); return; } @@ -608,23 +614,51 @@ static void spi_dev_set_name(struct spi_device *spi) spi_get_chipselect(spi, 0)); } +/* + * Zero(0) is a valid physical CS value and can be located at any + * logical CS in the spi->chip_select[]. If all the physical CS + * are initialized to 0 then It would be difficult to differentiate + * between a valid physical CS 0 & an unused logical CS whose physical + * CS can be 0. As a solution to this issue initialize all the CS to -1. + * Now all the unused logical CS will have -1 physical CS value & can be + * ignored while performing physical CS validity checks. + */ +#define SPI_INVALID_CS ((s8)-1) + +static inline bool is_valid_cs(s8 chip_select) +{ + return chip_select != SPI_INVALID_CS; +} + +static inline int spi_dev_check_cs(struct device *dev, + struct spi_device *spi, u8 idx, + struct spi_device *new_spi, u8 new_idx) +{ + u8 cs, cs_new; + u8 idx_new; + + cs = spi_get_chipselect(spi, idx); + for (idx_new = new_idx; idx_new < SPI_CS_CNT_MAX; idx_new++) { + cs_new = spi_get_chipselect(new_spi, idx_new); + if (is_valid_cs(cs) && is_valid_cs(cs_new) && cs == cs_new) { + dev_err(dev, "chipselect %u already in use\n", cs_new); + return -EBUSY; + } + } + return 0; +} + static int spi_dev_check(struct device *dev, void *data) { struct spi_device *spi = to_spi_device(dev); struct spi_device *new_spi = data; - int idx, nw_idx; - u8 cs, cs_nw; + int status, idx; if (spi->controller == new_spi->controller) { for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { - cs = spi_get_chipselect(spi, idx); - for (nw_idx = 0; nw_idx < SPI_CS_CNT_MAX; nw_idx++) { - cs_nw = spi_get_chipselect(new_spi, nw_idx); - if (cs != 0xFF && cs_nw != 0xFF && cs == cs_nw) { - dev_err(dev, "chipselect %d already in use\n", cs_nw); - return -EBUSY; - } - } + status = spi_dev_check_cs(dev, spi, idx, new_spi, 0); + if (status) + return status; } } return 0; @@ -640,13 +674,13 @@ static int __spi_add_device(struct spi_device *spi) { struct spi_controller *ctlr = spi->controller; struct device *dev = ctlr->dev.parent; - int status, idx, nw_idx; - u8 cs, nw_cs; + int status, idx; + u8 cs; for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { /* Chipselects are numbered 0..max; validate. */ cs = spi_get_chipselect(spi, idx); - if (cs != 0xFF && cs >= ctlr->num_chipselect) { + if (is_valid_cs(cs) && cs >= ctlr->num_chipselect) { dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx), ctlr->num_chipselect); return -EINVAL; @@ -657,14 +691,11 @@ static int __spi_add_device(struct spi_device *spi) * Make sure that multiple logical CS doesn't map to the same physical CS. * For example, spi->chip_select[0] != spi->chip_select[1] and so on. */ - for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { - cs = spi_get_chipselect(spi, idx); - for (nw_idx = idx + 1; nw_idx < SPI_CS_CNT_MAX; nw_idx++) { - nw_cs = spi_get_chipselect(spi, nw_idx); - if (cs != 0xFF && nw_cs != 0xFF && cs == nw_cs) { - dev_err(dev, "chipselect %d already in use\n", nw_cs); - return -EBUSY; - } + if (!spi_controller_is_target(ctlr)) { + for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { + status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1); + if (status) + return status; } } @@ -691,7 +722,7 @@ static int __spi_add_device(struct spi_device *spi) for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { cs = spi_get_chipselect(spi, idx); - if (cs != 0xFF) + if (is_valid_cs(cs)) spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]); } } @@ -745,6 +776,14 @@ int spi_add_device(struct spi_device *spi) } EXPORT_SYMBOL_GPL(spi_add_device); +static void spi_set_all_cs_unused(struct spi_device *spi) +{ + u8 idx; + + for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) + spi_set_chipselect(spi, idx, SPI_INVALID_CS); +} + /** * spi_new_device - instantiate one new SPI device * @ctlr: Controller to which device is connected @@ -764,7 +803,6 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr, { struct spi_device *proxy; int status; - u8 idx; /* * NOTE: caller did any chip->bus_num checks necessary. @@ -780,19 +818,10 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr, WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); - /* - * Zero(0) is a valid physical CS value and can be located at any - * logical CS in the spi->chip_select[]. If all the physical CS - * are initialized to 0 then It would be difficult to differentiate - * between a valid physical CS 0 & an unused logical CS whose physical - * CS can be 0. As a solution to this issue initialize all the CS to 0xFF. - * Now all the unused logical CS will have 0xFF physical CS value & can be - * ignore while performing physical CS validity checks. - */ - for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) - spi_set_chipselect(proxy, idx, 0xFF); - + /* Use provided chip-select for proxy device */ + spi_set_all_cs_unused(proxy); spi_set_chipselect(proxy, 0, chip->chip_select); + proxy->max_speed_hz = chip->max_speed_hz; proxy->mode = chip->mode; proxy->irq = chip->irq; @@ -801,14 +830,10 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr, proxy->controller_data = chip->controller_data; proxy->controller_state = NULL; /* - * spi->chip_select[i] gives the corresponding physical CS for logical CS i - * logical CS number is represented by setting the ith bit in spi->cs_index_mask - * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and - * spi->chip_select[0] will give the physical CS. - * By default spi->chip_select[0] will hold the physical CS number so, set - * spi->cs_index_mask as 0x01. + * By default spi->chip_select[0] will hold the physical CS number, + * so set bit 0 in spi->cs_index_mask. */ - proxy->cs_index_mask = 0x01; + proxy->cs_index_mask = BIT(0); if (chip->swnode) { status = device_add_software_node(&proxy->dev, chip->swnode); @@ -841,15 +866,18 @@ EXPORT_SYMBOL_GPL(spi_new_device); */ void spi_unregister_device(struct spi_device *spi) { + struct fwnode_handle *fwnode; + if (!spi) return; - if (spi->dev.of_node) { - of_node_clear_flag(spi->dev.of_node, OF_POPULATED); - of_node_put(spi->dev.of_node); + fwnode = dev_fwnode(&spi->dev); + if (is_of_node(fwnode)) { + of_node_clear_flag(to_of_node(fwnode), OF_POPULATED); + of_node_put(to_of_node(fwnode)); + } else if (is_acpi_device_node(fwnode)) { + acpi_device_clear_enumerated(to_acpi_device_node(fwnode)); } - if (ACPI_COMPANION(&spi->dev)) - acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); device_remove_software_node(&spi->dev); device_del(&spi->dev); spi_cleanup(spi); @@ -961,9 +989,6 @@ static void spi_res_free(void *res) { struct spi_res *sres = container_of(res, struct spi_res, data); - if (!res) - return; - WARN_ON(!list_empty(&sres->entry)); kfree(sres); } @@ -1001,20 +1026,45 @@ static void spi_res_release(struct spi_controller *ctlr, struct spi_message *mes } /*-------------------------------------------------------------------------*/ +#define spi_for_each_valid_cs(spi, idx) \ + for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) \ + if (!(spi->cs_index_mask & BIT(idx))) {} else + static inline bool spi_is_last_cs(struct spi_device *spi) { u8 idx; bool last = false; - for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { - if ((spi->cs_index_mask >> idx) & 0x01) { - if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx)) - last = true; - } + spi_for_each_valid_cs(spi, idx) { + if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx)) + last = true; } return last; } +static void spi_toggle_csgpiod(struct spi_device *spi, u8 idx, bool enable, bool activate) +{ + /* + * Historically ACPI has no means of the GPIO polarity and + * thus the SPISerialBus() resource defines it on the per-chip + * basis. In order to avoid a chain of negations, the GPIO + * polarity is considered being Active High. Even for the cases + * when _DSD() is involved (in the updated versions of ACPI) + * the GPIO CS polarity must be defined Active High to avoid + * ambiguity. That's why we use enable, that takes SPI_CS_HIGH + * into account. + */ + if (is_acpi_device_node(dev_fwnode(&spi->dev))) + gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), !enable); + else + /* Polarity handled by GPIO library */ + gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), activate); + + if (activate) + spi_delay_exec(&spi->cs_setup, NULL); + else + spi_delay_exec(&spi->cs_inactive, NULL); +} static void spi_set_cs(struct spi_device *spi, bool enable, bool force) { @@ -1036,59 +1086,40 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force) spi->controller->last_cs_index_mask = spi->cs_index_mask; for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) - spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : -1; + spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : SPI_INVALID_CS; spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; if (spi->mode & SPI_CS_HIGH) enable = !enable; - if (spi_is_csgpiod(spi)) { - if (!spi->controller->set_cs_timing && !activate) - spi_delay_exec(&spi->cs_hold, NULL); + /* + * Handle chip select delays for GPIO based CS or controllers without + * programmable chip select timing. + */ + if ((spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) && !activate) + spi_delay_exec(&spi->cs_hold, NULL); + if (spi_is_csgpiod(spi)) { if (!(spi->mode & SPI_NO_CS)) { - /* - * Historically ACPI has no means of the GPIO polarity and - * thus the SPISerialBus() resource defines it on the per-chip - * basis. In order to avoid a chain of negations, the GPIO - * polarity is considered being Active High. Even for the cases - * when _DSD() is involved (in the updated versions of ACPI) - * the GPIO CS polarity must be defined Active High to avoid - * ambiguity. That's why we use enable, that takes SPI_CS_HIGH - * into account. - */ - for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { - if (((spi->cs_index_mask >> idx) & 0x01) && - spi_get_csgpiod(spi, idx)) { - if (has_acpi_companion(&spi->dev)) - gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), - !enable); - else - /* Polarity handled by GPIO library */ - gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), - activate); - - if (activate) - spi_delay_exec(&spi->cs_setup, NULL); - else - spi_delay_exec(&spi->cs_inactive, NULL); - } + spi_for_each_valid_cs(spi, idx) { + if (spi_get_csgpiod(spi, idx)) + spi_toggle_csgpiod(spi, idx, enable, activate); } } /* Some SPI masters need both GPIO CS & slave_select */ if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) && spi->controller->set_cs) spi->controller->set_cs(spi, !enable); - - if (!spi->controller->set_cs_timing) { - if (activate) - spi_delay_exec(&spi->cs_setup, NULL); - else - spi_delay_exec(&spi->cs_inactive, NULL); - } } else if (spi->controller->set_cs) { spi->controller->set_cs(spi, !enable); } + + if (spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) { + if (activate) + spi_delay_exec(&spi->cs_setup, NULL); + else + spi_delay_exec(&spi->cs_inactive, NULL); + } } #ifdef CONFIG_HAS_DMA @@ -1181,12 +1212,10 @@ static void spi_unmap_buf_attrs(struct spi_controller *ctlr, enum dma_data_direction dir, unsigned long attrs) { - if (sgt->orig_nents) { - dma_unmap_sgtable(dev, sgt, dir, attrs); - sg_free_table(sgt); - sgt->orig_nents = 0; - sgt->nents = 0; - } + dma_unmap_sgtable(dev, sgt, dir, attrs); + sg_free_table(sgt); + sgt->orig_nents = 0; + sgt->nents = 0; } void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, @@ -1218,6 +1247,7 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) else rx_dev = ctlr->dev.parent; + ret = -ENOMSG; list_for_each_entry(xfer, &msg->transfers, transfer_list) { /* The sync is done before each transfer. */ unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC; @@ -1232,6 +1262,8 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) attrs); if (ret != 0) return ret; + + xfer->tx_sg_mapped = true; } if (xfer->rx_buf != NULL) { @@ -1245,12 +1277,16 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) return ret; } + + xfer->rx_sg_mapped = true; } } + /* No transfer has been mapped, bail out with success */ + if (ret) + return 0; ctlr->cur_rx_dma_dev = rx_dev; ctlr->cur_tx_dma_dev = tx_dev; - ctlr->cur_msg_mapped = true; return 0; } @@ -1261,24 +1297,21 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) struct device *tx_dev = ctlr->cur_tx_dma_dev; struct spi_transfer *xfer; - if (!ctlr->cur_msg_mapped || !ctlr->can_dma) - return 0; - list_for_each_entry(xfer, &msg->transfers, transfer_list) { /* The sync has already been done after each transfer. */ unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC; - if (!ctlr->can_dma(ctlr, msg->spi, xfer)) - continue; + if (xfer->rx_sg_mapped) + spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg, + DMA_FROM_DEVICE, attrs); + xfer->rx_sg_mapped = false; - spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg, - DMA_FROM_DEVICE, attrs); - spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg, - DMA_TO_DEVICE, attrs); + if (xfer->tx_sg_mapped) + spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg, + DMA_TO_DEVICE, attrs); + xfer->tx_sg_mapped = false; } - ctlr->cur_msg_mapped = false; - return 0; } @@ -1288,12 +1321,9 @@ static void spi_dma_sync_for_device(struct spi_controller *ctlr, struct device *rx_dev = ctlr->cur_rx_dma_dev; struct device *tx_dev = ctlr->cur_tx_dma_dev; - if (!ctlr->cur_msg_mapped) - return; - - if (xfer->tx_sg.orig_nents) + if (xfer->tx_sg_mapped) dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); - if (xfer->rx_sg.orig_nents) + if (xfer->rx_sg_mapped) dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); } @@ -1303,12 +1333,9 @@ static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, struct device *rx_dev = ctlr->cur_rx_dma_dev; struct device *tx_dev = ctlr->cur_tx_dma_dev; - if (!ctlr->cur_msg_mapped) - return; - - if (xfer->rx_sg.orig_nents) + if (xfer->rx_sg_mapped) dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); - if (xfer->tx_sg.orig_nents) + if (xfer->tx_sg_mapped) dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); } #else /* !CONFIG_HAS_DMA */ @@ -1415,7 +1442,7 @@ static int spi_transfer_wait(struct spi_controller *ctlr, u32 speed_hz = xfer->speed_hz; unsigned long long ms; - if (spi_controller_is_slave(ctlr)) { + if (spi_controller_is_target(ctlr)) { if (wait_for_completion_interruptible(&ctlr->xfer_completion)) { dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n"); return -EINTR; @@ -1589,8 +1616,8 @@ static int spi_transfer_one_message(struct spi_controller *ctlr, list_for_each_entry(xfer, &msg->transfers, transfer_list) { trace_spi_transfer_start(msg, xfer); - spi_statistics_add_transfer_stats(statm, xfer, ctlr); - spi_statistics_add_transfer_stats(stats, xfer, ctlr); + spi_statistics_add_transfer_stats(statm, xfer, msg); + spi_statistics_add_transfer_stats(stats, xfer, msg); if (!ctlr->ptp_sts_supported) { xfer->ptp_sts_word_pre = 0; @@ -1606,8 +1633,8 @@ fallback_pio: if (ret < 0) { spi_dma_sync_for_cpu(ctlr, xfer); - if (ctlr->cur_msg_mapped && - (xfer->error & SPI_TRANS_FAIL_NO_START)) { + if ((xfer->tx_sg_mapped || xfer->rx_sg_mapped) && + (xfer->error & SPI_TRANS_FAIL_NO_START)) { __spi_unmap_msg(ctlr, msg); ctlr->fallback = true; xfer->error &= ~SPI_TRANS_FAIL_NO_START; @@ -1747,15 +1774,6 @@ static int __spi_pump_transfer_message(struct spi_controller *ctlr, trace_spi_message_start(msg); - ret = spi_split_transfers_maxsize(ctlr, msg, - spi_max_transfer_size(msg->spi), - GFP_KERNEL | GFP_DMA); - if (ret) { - msg->status = ret; - spi_finalize_current_message(ctlr); - return ret; - } - if (ctlr->prepare_message) { ret = ctlr->prepare_message(ctlr, msg); if (ret) { @@ -2037,7 +2055,7 @@ static int spi_init_queue(struct spi_controller *ctlr) ctlr->busy = false; ctlr->queue_empty = true; - ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev)); + ctlr->kworker = kthread_run_worker(0, dev_name(&ctlr->dev)); if (IS_ERR(ctlr->kworker)) { dev_err(&ctlr->dev, "failed to create message pump kworker\n"); return PTR_ERR(ctlr->kworker); @@ -2083,6 +2101,44 @@ struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr) } EXPORT_SYMBOL_GPL(spi_get_next_queued_message); +/* + * __spi_unoptimize_message - shared implementation of spi_unoptimize_message() + * and spi_maybe_unoptimize_message() + * @msg: the message to unoptimize + * + * Peripheral drivers should use spi_unoptimize_message() and callers inside + * core should use spi_maybe_unoptimize_message() rather than calling this + * function directly. + * + * It is not valid to call this on a message that is not currently optimized. + */ +static void __spi_unoptimize_message(struct spi_message *msg) +{ + struct spi_controller *ctlr = msg->spi->controller; + + if (ctlr->unoptimize_message) + ctlr->unoptimize_message(msg); + + spi_res_release(ctlr, msg); + + msg->optimized = false; + msg->opt_state = NULL; +} + +/* + * spi_maybe_unoptimize_message - unoptimize msg not managed by a peripheral + * @msg: the message to unoptimize + * + * This function is used to unoptimize a message if and only if it was + * optimized by the core (via spi_maybe_optimize_message()). + */ +static void spi_maybe_unoptimize_message(struct spi_message *msg) +{ + if (!msg->pre_optimized && msg->optimized && + !msg->spi->controller->defer_optimize_message) + __spi_unoptimize_message(msg); +} + /** * spi_finalize_current_message() - the current message is complete * @ctlr: the controller to return the message to @@ -2111,15 +2167,6 @@ void spi_finalize_current_message(struct spi_controller *ctlr) spi_unmap_msg(ctlr, mesg); - /* - * In the prepare_messages callback the SPI bus has the opportunity - * to split a transfer to smaller chunks. - * - * Release the split transfers here since spi_map_msg() is done on - * the split transfers. - */ - spi_res_release(ctlr, mesg); - if (mesg->prepared && ctlr->unprepare_message) { ret = ctlr->unprepare_message(ctlr, mesg); if (ret) { @@ -2130,6 +2177,8 @@ void spi_finalize_current_message(struct spi_controller *ctlr) mesg->prepared = false; + spi_maybe_unoptimize_message(mesg); + WRITE_ONCE(ctlr->cur_msg_incomplete, false); smp_mb(); /* See __spi_pump_transfer_message()... */ if (READ_ONCE(ctlr->cur_msg_need_completion)) @@ -2165,11 +2214,8 @@ static int spi_start_queue(struct spi_controller *ctlr) static int spi_stop_queue(struct spi_controller *ctlr) { + unsigned int limit = 500; unsigned long flags; - unsigned limit = 500; - int ret = 0; - - spin_lock_irqsave(&ctlr->queue_lock, flags); /* * This is a bit lame, but is optimized for the common execution path. @@ -2177,20 +2223,18 @@ static int spi_stop_queue(struct spi_controller *ctlr) * execution path (pump_messages) would be required to call wake_up or * friends on every SPI message. Do this instead. */ - while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) { + do { + spin_lock_irqsave(&ctlr->queue_lock, flags); + if (list_empty(&ctlr->queue) && !ctlr->busy) { + ctlr->running = false; + spin_unlock_irqrestore(&ctlr->queue_lock, flags); + return 0; + } spin_unlock_irqrestore(&ctlr->queue_lock, flags); usleep_range(10000, 11000); - spin_lock_irqsave(&ctlr->queue_lock, flags); - } - - if (!list_empty(&ctlr->queue) || ctlr->busy) - ret = -EBUSY; - else - ctlr->running = false; - - spin_unlock_irqrestore(&ctlr->queue_lock, flags); + } while (--limit); - return ret; + return -EBUSY; } static int spi_destroy_queue(struct spi_controller *ctlr) @@ -2383,7 +2427,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, } } - if (spi_controller_is_slave(ctlr)) { + if (spi_controller_is_target(ctlr)) { if (!of_node_name_eq(nc, "slave")) { dev_err(&ctlr->dev, "%pOF is not called 'slave'\n", nc); @@ -2397,17 +2441,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, return -EINVAL; } - /* - * Zero(0) is a valid physical CS value and can be located at any - * logical CS in the spi->chip_select[]. If all the physical CS - * are initialized to 0 then It would be difficult to differentiate - * between a valid physical CS 0 & an unused logical CS whose physical - * CS can be 0. As a solution to this issue initialize all the CS to 0xFF. - * Now all the unused logical CS will have 0xFF physical CS value & can be - * ignore while performing physical CS validity checks. - */ - for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) - spi_set_chipselect(spi, idx, 0xFF); + spi_set_all_cs_unused(spi); /* Device address */ rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1, @@ -2422,7 +2456,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, nc, rc); return rc; } - if ((of_property_read_bool(nc, "parallel-memories")) && + if ((of_property_present(nc, "parallel-memories")) && (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) { dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n"); return -EINVAL; @@ -2431,14 +2465,10 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, spi_set_chipselect(spi, idx, cs[idx]); /* - * spi->chip_select[i] gives the corresponding physical CS for logical CS i - * logical CS number is represented by setting the ith bit in spi->cs_index_mask - * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and - * spi->chip_select[0] will give the physical CS. - * By default spi->chip_select[0] will hold the physical CS number so, set - * spi->cs_index_mask as 0x01. + * By default spi->chip_select[0] will hold the physical CS number, + * so set bit 0 in spi->cs_index_mask. */ - spi->cs_index_mask = 0x01; + spi->cs_index_mask = BIT(0); /* Device speed */ if (!of_property_read_u32(nc, "spi-max-frequency", &value)) @@ -2543,8 +2573,7 @@ struct spi_device *spi_new_ancillary_device(struct spi_device *spi, { struct spi_controller *ctlr = spi->controller; struct spi_device *ancillary; - int rc = 0; - u8 idx; + int rc; /* Alloc an spi_device */ ancillary = spi_alloc_device(ctlr); @@ -2555,33 +2584,18 @@ struct spi_device *spi_new_ancillary_device(struct spi_device *spi, strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias)); - /* - * Zero(0) is a valid physical CS value and can be located at any - * logical CS in the spi->chip_select[]. If all the physical CS - * are initialized to 0 then It would be difficult to differentiate - * between a valid physical CS 0 & an unused logical CS whose physical - * CS can be 0. As a solution to this issue initialize all the CS to 0xFF. - * Now all the unused logical CS will have 0xFF physical CS value & can be - * ignore while performing physical CS validity checks. - */ - for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) - spi_set_chipselect(ancillary, idx, 0xFF); - /* Use provided chip-select for ancillary device */ + spi_set_all_cs_unused(ancillary); spi_set_chipselect(ancillary, 0, chip_select); /* Take over SPI mode/speed from SPI main device */ ancillary->max_speed_hz = spi->max_speed_hz; ancillary->mode = spi->mode; /* - * spi->chip_select[i] gives the corresponding physical CS for logical CS i - * logical CS number is represented by setting the ith bit in spi->cs_index_mask - * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and - * spi->chip_select[0] will give the physical CS. - * By default spi->chip_select[0] will hold the physical CS number so, set - * spi->cs_index_mask as 0x01. + * By default spi->chip_select[0] will hold the physical CS number, + * so set bit 0 in spi->cs_index_mask. */ - ancillary->cs_index_mask = 0x01; + ancillary->cs_index_mask = BIT(0); WARN_ON(!mutex_is_locked(&ctlr->add_lock)); @@ -2705,7 +2719,7 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) return -ENODEV; if (ctlr) { - if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle) + if (!device_match_acpi_handle(ctlr->dev.parent, parent_handle)) return -ENODEV; } else { struct acpi_device *adev; @@ -2784,7 +2798,6 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, struct acpi_spi_lookup lookup = {}; struct spi_device *spi; int ret; - u8 idx; if (!ctlr && index == -1) return ERR_PTR(-EINVAL); @@ -2805,7 +2818,7 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, if (!lookup.max_speed_hz && ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) && - ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) { + device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) { /* Apple does not use _CRS but nested devices for SPI slaves */ acpi_spi_parse_apple_properties(adev, &lookup); } @@ -2820,33 +2833,19 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, return ERR_PTR(-ENOMEM); } - /* - * Zero(0) is a valid physical CS value and can be located at any - * logical CS in the spi->chip_select[]. If all the physical CS - * are initialized to 0 then It would be difficult to differentiate - * between a valid physical CS 0 & an unused logical CS whose physical - * CS can be 0. As a solution to this issue initialize all the CS to 0xFF. - * Now all the unused logical CS will have 0xFF physical CS value & can be - * ignore while performing physical CS validity checks. - */ - for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) - spi_set_chipselect(spi, idx, 0xFF); + spi_set_all_cs_unused(spi); + spi_set_chipselect(spi, 0, lookup.chip_select); ACPI_COMPANION_SET(&spi->dev, adev); spi->max_speed_hz = lookup.max_speed_hz; spi->mode |= lookup.mode; spi->irq = lookup.irq; spi->bits_per_word = lookup.bits_per_word; - spi_set_chipselect(spi, 0, lookup.chip_select); /* - * spi->chip_select[i] gives the corresponding physical CS for logical CS i - * logical CS number is represented by setting the ith bit in spi->cs_index_mask - * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and - * spi->chip_select[0] will give the physical CS. - * By default spi->chip_select[0] will hold the physical CS number so, set - * spi->cs_index_mask as 0x01. + * By default spi->chip_select[0] will hold the physical CS number, + * so set bit 0 in spi->cs_index_mask. */ - spi->cs_index_mask = 0x01; + spi->cs_index_mask = BIT(0); return spi; } @@ -2872,9 +2871,6 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, sizeof(spi->modalias)); - if (spi->irq < 0) - spi->irq = acpi_dev_gpio_irq_get(adev, 0); - acpi_device_set_enumerated(adev); adev->power.flags.ignore_parent = true; @@ -2929,7 +2925,7 @@ static void spi_controller_release(struct device *dev) kfree(ctlr); } -static struct class spi_master_class = { +static const struct class spi_master_class = { .name = "spi_master", .dev_release = spi_controller_release, .dev_groups = spi_master_groups, @@ -2937,21 +2933,10 @@ static struct class spi_master_class = { #ifdef CONFIG_SPI_SLAVE /** - * spi_slave_abort - abort the ongoing transfer request on an SPI slave + * spi_target_abort - abort the ongoing transfer request on an SPI slave * controller * @spi: device used for the current transfer */ -int spi_slave_abort(struct spi_device *spi) -{ - struct spi_controller *ctlr = spi->controller; - - if (spi_controller_is_slave(ctlr) && ctlr->slave_abort) - return ctlr->slave_abort(ctlr); - - return -ENOTSUPP; -} -EXPORT_SYMBOL_GPL(spi_slave_abort); - int spi_target_abort(struct spi_device *spi) { struct spi_controller *ctlr = spi->controller; @@ -2969,9 +2954,13 @@ static ssize_t slave_show(struct device *dev, struct device_attribute *attr, struct spi_controller *ctlr = container_of(dev, struct spi_controller, dev); struct device *child; + int ret; child = device_find_any_child(&ctlr->dev); - return sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL); + ret = sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL); + put_device(child); + + return ret; } static ssize_t slave_store(struct device *dev, struct device_attribute *attr, @@ -3030,7 +3019,7 @@ static const struct attribute_group *spi_slave_groups[] = { NULL, }; -static struct class spi_slave_class = { +static const struct class spi_slave_class = { .name = "spi_slave", .dev_release = spi_controller_release, .dev_groups = spi_slave_groups, @@ -3252,9 +3241,9 @@ static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int e } /** - * spi_register_controller - register SPI master or slave controller - * @ctlr: initialized master, originally from spi_alloc_master() or - * spi_alloc_slave() + * spi_register_controller - register SPI host or target controller + * @ctlr: initialized controller, originally from spi_alloc_host() or + * spi_alloc_target() * Context: can sleep * * SPI controllers connect to their drivers using some non-SPI bus, @@ -3324,7 +3313,7 @@ int spi_register_controller(struct spi_controller *ctlr) */ dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num); - if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) { + if (!spi_controller_is_target(ctlr) && ctlr->use_gpio_descriptors) { status = spi_get_gpio_descs(ctlr); if (status) goto free_bus_id; @@ -3344,15 +3333,15 @@ int spi_register_controller(struct spi_controller *ctlr) goto free_bus_id; } - /* Setting last_cs to -1 means no chip selected */ + /* Setting last_cs to SPI_INVALID_CS means no chip selected */ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) - ctlr->last_cs[idx] = -1; + ctlr->last_cs[idx] = SPI_INVALID_CS; status = device_add(&ctlr->dev); if (status < 0) goto free_bus_id; dev_dbg(dev, "registered %s %s\n", - spi_controller_is_slave(ctlr) ? "slave" : "master", + spi_controller_is_target(ctlr) ? "target" : "host", dev_name(&ctlr->dev)); /* @@ -3404,11 +3393,11 @@ static void devm_spi_unregister(struct device *dev, void *res) } /** - * devm_spi_register_controller - register managed SPI master or slave + * devm_spi_register_controller - register managed SPI host or target * controller * @dev: device managing SPI controller - * @ctlr: initialized controller, originally from spi_alloc_master() or - * spi_alloc_slave() + * @ctlr: initialized controller, originally from spi_alloc_host() or + * spi_alloc_target() * Context: can sleep * * Register a SPI device as with spi_register_controller() which will @@ -3492,7 +3481,7 @@ void spi_unregister_controller(struct spi_controller *ctlr) /* * Release the last reference on the controller if its driver - * has not yet been converted to devm_spi_alloc_master/slave(). + * has not yet been converted to devm_spi_alloc_host/target(). */ if (!ctlr->devm_allocated) put_device(&ctlr->dev); @@ -3687,8 +3676,7 @@ static struct spi_replaced_transfers *spi_replace_transfers( static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, struct spi_message *msg, struct spi_transfer **xferp, - size_t maxsize, - gfp_t gfp) + size_t maxsize) { struct spi_transfer *xfer = *xferp, *xfers; struct spi_replaced_transfers *srt; @@ -3699,7 +3687,7 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, count = DIV_ROUND_UP(xfer->len, maxsize); /* Create replacement */ - srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp); + srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, GFP_KERNEL); if (IS_ERR(srt)) return PTR_ERR(srt); xfers = srt->inserted_transfers; @@ -3710,9 +3698,6 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, * to the same values as *xferp, so tx_buf, rx_buf and len * are all identical (as well as most others) * so we just have to fix up len and the pointers. - * - * This also includes support for the depreciated - * spi_message.is_dma_mapped interface. */ /* @@ -3726,12 +3711,8 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, /* Update rx_buf, tx_buf and DMA */ if (xfers[i].rx_buf) xfers[i].rx_buf += offset; - if (xfers[i].rx_dma) - xfers[i].rx_dma += offset; if (xfers[i].tx_buf) xfers[i].tx_buf += offset; - if (xfers[i].tx_dma) - xfers[i].tx_dma += offset; /* Update length */ xfers[i].len = min(maxsize, xfers[i].len - offset); @@ -3759,14 +3740,16 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, * @ctlr: the @spi_controller for this transfer * @msg: the @spi_message to transform * @maxsize: the maximum when to apply this - * @gfp: GFP allocation flags + * + * This function allocates resources that are automatically freed during the + * spi message unoptimize phase so this function should only be called from + * optimize_message callbacks. * * Return: status of transformation */ int spi_split_transfers_maxsize(struct spi_controller *ctlr, struct spi_message *msg, - size_t maxsize, - gfp_t gfp) + size_t maxsize) { struct spi_transfer *xfer; int ret; @@ -3781,7 +3764,7 @@ int spi_split_transfers_maxsize(struct spi_controller *ctlr, list_for_each_entry(xfer, &msg->transfers, transfer_list) { if (xfer->len > maxsize) { ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, - maxsize, gfp); + maxsize); if (ret) return ret; } @@ -3799,14 +3782,16 @@ EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize); * @ctlr: the @spi_controller for this transfer * @msg: the @spi_message to transform * @maxwords: the number of words to limit each transfer to - * @gfp: GFP allocation flags + * + * This function allocates resources that are automatically freed during the + * spi message unoptimize phase so this function should only be called from + * optimize_message callbacks. * * Return: status of transformation */ int spi_split_transfers_maxwords(struct spi_controller *ctlr, struct spi_message *msg, - size_t maxwords, - gfp_t gfp) + size_t maxwords) { struct spi_transfer *xfer; @@ -3824,7 +3809,7 @@ int spi_split_transfers_maxwords(struct spi_controller *ctlr, maxsize = maxwords * roundup_pow_of_two(BITS_TO_BYTES(xfer->bits_per_word)); if (xfer->len > maxsize) { ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, - maxsize, gfp); + maxsize); if (ret) return ret; } @@ -3909,7 +3894,7 @@ static int spi_set_cs_timing(struct spi_device *spi) int spi_setup(struct spi_device *spi) { unsigned bad_bits, ugly_bits; - int status = 0; + int status; /* * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO @@ -3928,6 +3913,12 @@ int spi_setup(struct spi_device *spi) (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))) return -EINVAL; + /* Check against conflicting MOSI idle configuration */ + if ((spi->mode & SPI_MOSI_IDLE_LOW) && (spi->mode & SPI_MOSI_IDLE_HIGH)) { + dev_err(&spi->dev, + "setup: MOSI configured to idle low and high at the same time.\n"); + return -EINVAL; + } /* * Help drivers fail *cleanly* when they need options * that aren't supported with their current controller. @@ -4063,33 +4054,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) if (list_empty(&message->transfers)) return -EINVAL; - /* - * If an SPI controller does not support toggling the CS line on each - * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO - * for the CS line, we can emulate the CS-per-word hardware function by - * splitting transfers into one-word transfers and ensuring that - * cs_change is set for each transfer. - */ - if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) || - spi_is_csgpiod(spi))) { - size_t maxsize = BITS_TO_BYTES(spi->bits_per_word); - int ret; - - /* spi_split_transfers_maxsize() requires message->spi */ - message->spi = spi; - - ret = spi_split_transfers_maxsize(ctlr, message, maxsize, - GFP_KERNEL); - if (ret) - return ret; - - list_for_each_entry(xfer, &message->transfers, transfer_list) { - /* Don't change cs_change on the last entry in the list */ - if (list_is_last(&xfer->transfer_list, &message->transfers)) - break; - xfer->cs_change = 1; - } - } + message->spi = spi; /* * Half-duplex links include original MicroWire, and ones with @@ -4168,7 +4133,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) return -EINVAL; if (xfer->tx_nbits != SPI_NBITS_SINGLE && xfer->tx_nbits != SPI_NBITS_DUAL && - xfer->tx_nbits != SPI_NBITS_QUAD) + xfer->tx_nbits != SPI_NBITS_QUAD && + xfer->tx_nbits != SPI_NBITS_OCTAL) return -EINVAL; if ((xfer->tx_nbits == SPI_NBITS_DUAL) && !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) @@ -4183,7 +4149,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) return -EINVAL; if (xfer->rx_nbits != SPI_NBITS_SINGLE && xfer->rx_nbits != SPI_NBITS_DUAL && - xfer->rx_nbits != SPI_NBITS_QUAD) + xfer->rx_nbits != SPI_NBITS_QUAD && + xfer->rx_nbits != SPI_NBITS_OCTAL) return -EINVAL; if ((xfer->rx_nbits == SPI_NBITS_DUAL) && !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) @@ -4202,6 +4169,182 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) return 0; } +/* + * spi_split_transfers - generic handling of transfer splitting + * @msg: the message to split + * + * Under certain conditions, a SPI controller may not support arbitrary + * transfer sizes or other features required by a peripheral. This function + * will split the transfers in the message into smaller transfers that are + * supported by the controller. + * + * Controllers with special requirements not covered here can also split + * transfers in the optimize_message() callback. + * + * Context: can sleep + * Return: zero on success, else a negative error code + */ +static int spi_split_transfers(struct spi_message *msg) +{ + struct spi_controller *ctlr = msg->spi->controller; + struct spi_transfer *xfer; + int ret; + + /* + * If an SPI controller does not support toggling the CS line on each + * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO + * for the CS line, we can emulate the CS-per-word hardware function by + * splitting transfers into one-word transfers and ensuring that + * cs_change is set for each transfer. + */ + if ((msg->spi->mode & SPI_CS_WORD) && + (!(ctlr->mode_bits & SPI_CS_WORD) || spi_is_csgpiod(msg->spi))) { + ret = spi_split_transfers_maxwords(ctlr, msg, 1); + if (ret) + return ret; + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + /* Don't change cs_change on the last entry in the list */ + if (list_is_last(&xfer->transfer_list, &msg->transfers)) + break; + + xfer->cs_change = 1; + } + } else { + ret = spi_split_transfers_maxsize(ctlr, msg, + spi_max_transfer_size(msg->spi)); + if (ret) + return ret; + } + + return 0; +} + +/* + * __spi_optimize_message - shared implementation for spi_optimize_message() + * and spi_maybe_optimize_message() + * @spi: the device that will be used for the message + * @msg: the message to optimize + * + * Peripheral drivers will call spi_optimize_message() and the spi core will + * call spi_maybe_optimize_message() instead of calling this directly. + * + * It is not valid to call this on a message that has already been optimized. + * + * Return: zero on success, else a negative error code + */ +static int __spi_optimize_message(struct spi_device *spi, + struct spi_message *msg) +{ + struct spi_controller *ctlr = spi->controller; + int ret; + + ret = __spi_validate(spi, msg); + if (ret) + return ret; + + ret = spi_split_transfers(msg); + if (ret) + return ret; + + if (ctlr->optimize_message) { + ret = ctlr->optimize_message(msg); + if (ret) { + spi_res_release(ctlr, msg); + return ret; + } + } + + msg->optimized = true; + + return 0; +} + +/* + * spi_maybe_optimize_message - optimize message if it isn't already pre-optimized + * @spi: the device that will be used for the message + * @msg: the message to optimize + * Return: zero on success, else a negative error code + */ +static int spi_maybe_optimize_message(struct spi_device *spi, + struct spi_message *msg) +{ + if (spi->controller->defer_optimize_message) { + msg->spi = spi; + return 0; + } + + if (msg->pre_optimized) + return 0; + + return __spi_optimize_message(spi, msg); +} + +/** + * spi_optimize_message - do any one-time validation and setup for a SPI message + * @spi: the device that will be used for the message + * @msg: the message to optimize + * + * Peripheral drivers that reuse the same message repeatedly may call this to + * perform as much message prep as possible once, rather than repeating it each + * time a message transfer is performed to improve throughput and reduce CPU + * usage. + * + * Once a message has been optimized, it cannot be modified with the exception + * of updating the contents of any xfer->tx_buf (the pointer can't be changed, + * only the data in the memory it points to). + * + * Calls to this function must be balanced with calls to spi_unoptimize_message() + * to avoid leaking resources. + * + * Context: can sleep + * Return: zero on success, else a negative error code + */ +int spi_optimize_message(struct spi_device *spi, struct spi_message *msg) +{ + int ret; + + /* + * Pre-optimization is not supported and optimization is deferred e.g. + * when using spi-mux. + */ + if (spi->controller->defer_optimize_message) + return 0; + + ret = __spi_optimize_message(spi, msg); + if (ret) + return ret; + + /* + * This flag indicates that the peripheral driver called spi_optimize_message() + * and therefore we shouldn't unoptimize message automatically when finalizing + * the message but rather wait until spi_unoptimize_message() is called + * by the peripheral driver. + */ + msg->pre_optimized = true; + + return 0; +} +EXPORT_SYMBOL_GPL(spi_optimize_message); + +/** + * spi_unoptimize_message - releases any resources allocated by spi_optimize_message() + * @msg: the message to unoptimize + * + * Calls to this function must be balanced with calls to spi_optimize_message(). + * + * Context: can sleep + */ +void spi_unoptimize_message(struct spi_message *msg) +{ + if (msg->spi->controller->defer_optimize_message) + return; + + __spi_unoptimize_message(msg); + msg->pre_optimized = false; +} +EXPORT_SYMBOL_GPL(spi_unoptimize_message); + static int __spi_async(struct spi_device *spi, struct spi_message *message) { struct spi_controller *ctlr = spi->controller; @@ -4214,8 +4357,6 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message) if (!ctlr->transfer) return -ENOTSUPP; - message->spi = spi; - SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async); SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async); @@ -4231,62 +4372,36 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message) return ctlr->transfer(spi, message); } +static void devm_spi_unoptimize_message(void *msg) +{ + spi_unoptimize_message(msg); +} + /** - * spi_async - asynchronous SPI transfer - * @spi: device with which data will be exchanged - * @message: describes the data transfers, including completion callback - * Context: any (IRQs may be blocked, etc) - * - * This call may be used in_irq and other contexts which can't sleep, - * as well as from task contexts which can sleep. - * - * The completion callback is invoked in a context which can't sleep. - * Before that invocation, the value of message->status is undefined. - * When the callback is issued, message->status holds either zero (to - * indicate complete success) or a negative error code. After that - * callback returns, the driver which issued the transfer request may - * deallocate the associated memory; it's no longer in use by any SPI - * core or controller driver code. - * - * Note that although all messages to a spi_device are handled in - * FIFO order, messages may go to different devices in other orders. - * Some device might be higher priority, or have various "hard" access - * time requirements, for example. + * devm_spi_optimize_message - managed version of spi_optimize_message() + * @dev: the device that manages @msg (usually @spi->dev) + * @spi: the device that will be used for the message + * @msg: the message to optimize + * Return: zero on success, else a negative error code * - * On detection of any fault during the transfer, processing of - * the entire message is aborted, and the device is deselected. - * Until returning from the associated message completion callback, - * no other spi_message queued to that device will be processed. - * (This rule applies equally to all the synchronous transfer calls, - * which are wrappers around this core asynchronous primitive.) - * - * Return: zero on success, else a negative error code. + * spi_unoptimize_message() will automatically be called when the device is + * removed. */ -int spi_async(struct spi_device *spi, struct spi_message *message) +int devm_spi_optimize_message(struct device *dev, struct spi_device *spi, + struct spi_message *msg) { - struct spi_controller *ctlr = spi->controller; int ret; - unsigned long flags; - ret = __spi_validate(spi, message); - if (ret != 0) + ret = spi_optimize_message(spi, msg); + if (ret) return ret; - spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); - - if (ctlr->bus_lock_flag) - ret = -EBUSY; - else - ret = __spi_async(spi, message); - - spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); - - return ret; + return devm_add_action_or_reset(dev, devm_spi_unoptimize_message, msg); } -EXPORT_SYMBOL_GPL(spi_async); +EXPORT_SYMBOL_GPL(devm_spi_optimize_message); /** - * spi_async_locked - version of spi_async with exclusive bus usage + * spi_async - asynchronous SPI transfer * @spi: device with which data will be exchanged * @message: describes the data transfers, including completion callback * Context: any (IRQs may be blocked, etc) @@ -4316,25 +4431,28 @@ EXPORT_SYMBOL_GPL(spi_async); * * Return: zero on success, else a negative error code. */ -static int spi_async_locked(struct spi_device *spi, struct spi_message *message) +int spi_async(struct spi_device *spi, struct spi_message *message) { struct spi_controller *ctlr = spi->controller; int ret; unsigned long flags; - ret = __spi_validate(spi, message); - if (ret != 0) + ret = spi_maybe_optimize_message(spi, message); + if (ret) return ret; spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); - ret = __spi_async(spi, message); + if (ctlr->bus_lock_flag) + ret = -EBUSY; + else + ret = __spi_async(spi, message); spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); return ret; - } +EXPORT_SYMBOL_GPL(spi_async); static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg) { @@ -4383,6 +4501,7 @@ static void spi_complete(void *arg) static int __spi_sync(struct spi_device *spi, struct spi_message *message) { DECLARE_COMPLETION_ONSTACK(done); + unsigned long flags; int status; struct spi_controller *ctlr = spi->controller; @@ -4391,12 +4510,10 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message) return -ESHUTDOWN; } - status = __spi_validate(spi, message); - if (status != 0) + status = spi_maybe_optimize_message(spi, message); + if (status) return status; - message->spi = spi; - SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync); SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync); @@ -4428,11 +4545,16 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message) */ message->complete = spi_complete; message->context = &done; - status = spi_async_locked(spi, message); + + spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); + status = __spi_async(spi, message); + spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); + if (status == 0) { wait_for_completion(&done); status = message->status; } + message->complete = NULL; message->context = NULL; return status; @@ -4718,7 +4840,7 @@ extern struct notifier_block spi_of_notifier; #if IS_ENABLED(CONFIG_ACPI) static int spi_acpi_controller_match(struct device *dev, const void *data) { - return ACPI_COMPANION(dev->parent) == data; + return device_match_acpi_dev(dev->parent, data); } struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev) |